summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/Makefile7
-rw-r--r--Documentation/DocBook/procfs_example.c4
-rw-r--r--Documentation/Makefile3
-rw-r--r--Documentation/accounting/Makefile10
-rw-r--r--Documentation/accounting/getdelays.c25
-rw-r--r--Documentation/arm/Samsung-S3C24XX/GPIO.txt15
-rw-r--r--Documentation/arm/Samsung-S3C24XX/Overview.txt35
-rw-r--r--Documentation/auxdisplay/Makefile10
-rw-r--r--Documentation/connector/Makefile11
-rw-r--r--Documentation/cpu-hotplug.txt5
-rw-r--r--Documentation/devices.txt3
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--Documentation/filesystems/configfs/Makefile3
-rw-r--r--Documentation/filesystems/quota.txt22
-rw-r--r--Documentation/filesystems/ubifs.txt2
-rw-r--r--Documentation/hwmon/ibmaem33
-rw-r--r--Documentation/ia64/Makefile8
-rw-r--r--Documentation/ioctl-number.txt1
-rw-r--r--Documentation/lguest/lguest.c23
-rw-r--r--Documentation/networking/Makefile8
-rw-r--r--Documentation/networking/ifenslave.c2
-rw-r--r--Documentation/pcmcia/Makefile10
-rw-r--r--Documentation/pcmcia/crc32hash.c2
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt10
-rw-r--r--Documentation/spi/Makefile11
-rw-r--r--Documentation/usb/auerswald.txt30
-rw-r--r--Documentation/usb/power-management.txt7
-rw-r--r--Documentation/video4linux/Makefile8
-rw-r--r--Documentation/vm/Makefile8
-rw-r--r--Documentation/vm/page_migration9
-rw-r--r--Documentation/watchdog/src/Makefile8
-rw-r--r--MAINTAINERS35
-rw-r--r--Makefile7
-rw-r--r--arch/alpha/include/asm/8253pit.h (renamed from include/asm-alpha/8253pit.h)0
-rw-r--r--arch/alpha/include/asm/Kbuild (renamed from include/asm-alpha/Kbuild)0
-rw-r--r--arch/alpha/include/asm/a.out-core.h (renamed from include/asm-alpha/a.out-core.h)0
-rw-r--r--arch/alpha/include/asm/a.out.h (renamed from include/asm-alpha/a.out.h)0
-rw-r--r--arch/alpha/include/asm/agp.h (renamed from include/asm-alpha/agp.h)0
-rw-r--r--arch/alpha/include/asm/agp_backend.h (renamed from include/asm-alpha/agp_backend.h)0
-rw-r--r--arch/alpha/include/asm/atomic.h (renamed from include/asm-alpha/atomic.h)0
-rw-r--r--arch/alpha/include/asm/auxvec.h (renamed from include/asm-alpha/auxvec.h)0
-rw-r--r--arch/alpha/include/asm/barrier.h (renamed from include/asm-alpha/barrier.h)0
-rw-r--r--arch/alpha/include/asm/bitops.h (renamed from include/asm-alpha/bitops.h)0
-rw-r--r--arch/alpha/include/asm/bug.h (renamed from include/asm-alpha/bug.h)0
-rw-r--r--arch/alpha/include/asm/bugs.h (renamed from include/asm-alpha/bugs.h)0
-rw-r--r--arch/alpha/include/asm/byteorder.h (renamed from include/asm-alpha/byteorder.h)0
-rw-r--r--arch/alpha/include/asm/cache.h (renamed from include/asm-alpha/cache.h)0
-rw-r--r--arch/alpha/include/asm/cacheflush.h (renamed from include/asm-alpha/cacheflush.h)0
-rw-r--r--arch/alpha/include/asm/checksum.h (renamed from include/asm-alpha/checksum.h)0
-rw-r--r--arch/alpha/include/asm/compiler.h (renamed from include/asm-alpha/compiler.h)0
-rw-r--r--arch/alpha/include/asm/console.h (renamed from include/asm-alpha/console.h)0
-rw-r--r--arch/alpha/include/asm/core_apecs.h (renamed from include/asm-alpha/core_apecs.h)0
-rw-r--r--arch/alpha/include/asm/core_cia.h (renamed from include/asm-alpha/core_cia.h)0
-rw-r--r--arch/alpha/include/asm/core_irongate.h (renamed from include/asm-alpha/core_irongate.h)0
-rw-r--r--arch/alpha/include/asm/core_lca.h (renamed from include/asm-alpha/core_lca.h)0
-rw-r--r--arch/alpha/include/asm/core_marvel.h (renamed from include/asm-alpha/core_marvel.h)0
-rw-r--r--arch/alpha/include/asm/core_mcpcia.h (renamed from include/asm-alpha/core_mcpcia.h)0
-rw-r--r--arch/alpha/include/asm/core_polaris.h (renamed from include/asm-alpha/core_polaris.h)0
-rw-r--r--arch/alpha/include/asm/core_t2.h (renamed from include/asm-alpha/core_t2.h)0
-rw-r--r--arch/alpha/include/asm/core_titan.h (renamed from include/asm-alpha/core_titan.h)0
-rw-r--r--arch/alpha/include/asm/core_tsunami.h (renamed from include/asm-alpha/core_tsunami.h)0
-rw-r--r--arch/alpha/include/asm/core_wildfire.h (renamed from include/asm-alpha/core_wildfire.h)0
-rw-r--r--arch/alpha/include/asm/cputime.h (renamed from include/asm-alpha/cputime.h)0
-rw-r--r--arch/alpha/include/asm/current.h (renamed from include/asm-alpha/current.h)0
-rw-r--r--arch/alpha/include/asm/delay.h (renamed from include/asm-alpha/delay.h)0
-rw-r--r--arch/alpha/include/asm/device.h (renamed from include/asm-alpha/device.h)0
-rw-r--r--arch/alpha/include/asm/div64.h (renamed from include/asm-alpha/div64.h)0
-rw-r--r--arch/alpha/include/asm/dma-mapping.h (renamed from include/asm-alpha/dma-mapping.h)0
-rw-r--r--arch/alpha/include/asm/dma.h (renamed from include/asm-alpha/dma.h)0
-rw-r--r--arch/alpha/include/asm/elf.h (renamed from include/asm-alpha/elf.h)0
-rw-r--r--arch/alpha/include/asm/emergency-restart.h (renamed from include/asm-alpha/emergency-restart.h)0
-rw-r--r--arch/alpha/include/asm/err_common.h (renamed from include/asm-alpha/err_common.h)0
-rw-r--r--arch/alpha/include/asm/err_ev6.h (renamed from include/asm-alpha/err_ev6.h)0
-rw-r--r--arch/alpha/include/asm/err_ev7.h (renamed from include/asm-alpha/err_ev7.h)0
-rw-r--r--arch/alpha/include/asm/errno.h (renamed from include/asm-alpha/errno.h)0
-rw-r--r--arch/alpha/include/asm/fb.h (renamed from include/asm-alpha/fb.h)0
-rw-r--r--arch/alpha/include/asm/fcntl.h (renamed from include/asm-alpha/fcntl.h)0
-rw-r--r--arch/alpha/include/asm/floppy.h (renamed from include/asm-alpha/floppy.h)0
-rw-r--r--arch/alpha/include/asm/fpu.h (renamed from include/asm-alpha/fpu.h)0
-rw-r--r--arch/alpha/include/asm/futex.h (renamed from include/asm-alpha/futex.h)0
-rw-r--r--arch/alpha/include/asm/gct.h (renamed from include/asm-alpha/gct.h)0
-rw-r--r--arch/alpha/include/asm/gentrap.h (renamed from include/asm-alpha/gentrap.h)0
-rw-r--r--arch/alpha/include/asm/hardirq.h (renamed from include/asm-alpha/hardirq.h)0
-rw-r--r--arch/alpha/include/asm/hw_irq.h (renamed from include/asm-alpha/hw_irq.h)0
-rw-r--r--arch/alpha/include/asm/hwrpb.h (renamed from include/asm-alpha/hwrpb.h)0
-rw-r--r--arch/alpha/include/asm/io.h (renamed from include/asm-alpha/io.h)0
-rw-r--r--arch/alpha/include/asm/io_trivial.h (renamed from include/asm-alpha/io_trivial.h)0
-rw-r--r--arch/alpha/include/asm/ioctl.h (renamed from include/asm-alpha/ioctl.h)0
-rw-r--r--arch/alpha/include/asm/ioctls.h (renamed from include/asm-alpha/ioctls.h)0
-rw-r--r--arch/alpha/include/asm/ipcbuf.h (renamed from include/asm-alpha/ipcbuf.h)0
-rw-r--r--arch/alpha/include/asm/irq.h (renamed from include/asm-alpha/irq.h)0
-rw-r--r--arch/alpha/include/asm/irq_regs.h (renamed from include/asm-alpha/irq_regs.h)0
-rw-r--r--arch/alpha/include/asm/jensen.h (renamed from include/asm-alpha/jensen.h)0
-rw-r--r--arch/alpha/include/asm/kdebug.h (renamed from include/asm-alpha/kdebug.h)0
-rw-r--r--arch/alpha/include/asm/kmap_types.h (renamed from include/asm-alpha/kmap_types.h)0
-rw-r--r--arch/alpha/include/asm/linkage.h (renamed from include/asm-alpha/linkage.h)0
-rw-r--r--arch/alpha/include/asm/local.h (renamed from include/asm-alpha/local.h)0
-rw-r--r--arch/alpha/include/asm/machvec.h (renamed from include/asm-alpha/machvec.h)0
-rw-r--r--arch/alpha/include/asm/mc146818rtc.h (renamed from include/asm-alpha/mc146818rtc.h)0
-rw-r--r--arch/alpha/include/asm/md.h (renamed from include/asm-alpha/md.h)0
-rw-r--r--arch/alpha/include/asm/mman.h (renamed from include/asm-alpha/mman.h)0
-rw-r--r--arch/alpha/include/asm/mmu.h (renamed from include/asm-alpha/mmu.h)0
-rw-r--r--arch/alpha/include/asm/mmu_context.h (renamed from include/asm-alpha/mmu_context.h)0
-rw-r--r--arch/alpha/include/asm/mmzone.h (renamed from include/asm-alpha/mmzone.h)0
-rw-r--r--arch/alpha/include/asm/module.h (renamed from include/asm-alpha/module.h)0
-rw-r--r--arch/alpha/include/asm/msgbuf.h (renamed from include/asm-alpha/msgbuf.h)0
-rw-r--r--arch/alpha/include/asm/mutex.h (renamed from include/asm-alpha/mutex.h)0
-rw-r--r--arch/alpha/include/asm/page.h (renamed from include/asm-alpha/page.h)0
-rw-r--r--arch/alpha/include/asm/pal.h (renamed from include/asm-alpha/pal.h)0
-rw-r--r--arch/alpha/include/asm/param.h (renamed from include/asm-alpha/param.h)0
-rw-r--r--arch/alpha/include/asm/parport.h (renamed from include/asm-alpha/parport.h)0
-rw-r--r--arch/alpha/include/asm/pci.h (renamed from include/asm-alpha/pci.h)0
-rw-r--r--arch/alpha/include/asm/percpu.h (renamed from include/asm-alpha/percpu.h)0
-rw-r--r--arch/alpha/include/asm/pgalloc.h (renamed from include/asm-alpha/pgalloc.h)0
-rw-r--r--arch/alpha/include/asm/pgtable.h (renamed from include/asm-alpha/pgtable.h)0
-rw-r--r--arch/alpha/include/asm/poll.h (renamed from include/asm-alpha/poll.h)0
-rw-r--r--arch/alpha/include/asm/posix_types.h (renamed from include/asm-alpha/posix_types.h)0
-rw-r--r--arch/alpha/include/asm/processor.h (renamed from include/asm-alpha/processor.h)0
-rw-r--r--arch/alpha/include/asm/ptrace.h (renamed from include/asm-alpha/ptrace.h)0
-rw-r--r--arch/alpha/include/asm/reg.h (renamed from include/asm-alpha/reg.h)0
-rw-r--r--arch/alpha/include/asm/regdef.h (renamed from include/asm-alpha/regdef.h)0
-rw-r--r--arch/alpha/include/asm/resource.h (renamed from include/asm-alpha/resource.h)0
-rw-r--r--arch/alpha/include/asm/rtc.h (renamed from include/asm-alpha/rtc.h)0
-rw-r--r--arch/alpha/include/asm/rwsem.h (renamed from include/asm-alpha/rwsem.h)0
-rw-r--r--arch/alpha/include/asm/scatterlist.h (renamed from include/asm-alpha/scatterlist.h)0
-rw-r--r--arch/alpha/include/asm/sections.h (renamed from include/asm-alpha/sections.h)0
-rw-r--r--arch/alpha/include/asm/segment.h (renamed from include/asm-alpha/segment.h)0
-rw-r--r--arch/alpha/include/asm/sembuf.h (renamed from include/asm-alpha/sembuf.h)0
-rw-r--r--arch/alpha/include/asm/serial.h (renamed from include/asm-alpha/serial.h)0
-rw-r--r--arch/alpha/include/asm/setup.h (renamed from include/asm-alpha/setup.h)0
-rw-r--r--arch/alpha/include/asm/sfp-machine.h (renamed from include/asm-alpha/sfp-machine.h)0
-rw-r--r--arch/alpha/include/asm/shmbuf.h (renamed from include/asm-alpha/shmbuf.h)0
-rw-r--r--arch/alpha/include/asm/shmparam.h (renamed from include/asm-alpha/shmparam.h)0
-rw-r--r--arch/alpha/include/asm/sigcontext.h (renamed from include/asm-alpha/sigcontext.h)0
-rw-r--r--arch/alpha/include/asm/siginfo.h (renamed from include/asm-alpha/siginfo.h)0
-rw-r--r--arch/alpha/include/asm/signal.h (renamed from include/asm-alpha/signal.h)0
-rw-r--r--arch/alpha/include/asm/smp.h (renamed from include/asm-alpha/smp.h)0
-rw-r--r--arch/alpha/include/asm/socket.h (renamed from include/asm-alpha/socket.h)0
-rw-r--r--arch/alpha/include/asm/sockios.h (renamed from include/asm-alpha/sockios.h)0
-rw-r--r--arch/alpha/include/asm/spinlock.h (renamed from include/asm-alpha/spinlock.h)0
-rw-r--r--arch/alpha/include/asm/spinlock_types.h (renamed from include/asm-alpha/spinlock_types.h)0
-rw-r--r--arch/alpha/include/asm/stat.h (renamed from include/asm-alpha/stat.h)0
-rw-r--r--arch/alpha/include/asm/statfs.h (renamed from include/asm-alpha/statfs.h)0
-rw-r--r--arch/alpha/include/asm/string.h (renamed from include/asm-alpha/string.h)0
-rw-r--r--arch/alpha/include/asm/suspend.h (renamed from include/asm-alpha/suspend.h)0
-rw-r--r--arch/alpha/include/asm/sysinfo.h (renamed from include/asm-alpha/sysinfo.h)0
-rw-r--r--arch/alpha/include/asm/system.h (renamed from include/asm-alpha/system.h)0
-rw-r--r--arch/alpha/include/asm/termbits.h (renamed from include/asm-alpha/termbits.h)0
-rw-r--r--arch/alpha/include/asm/termios.h (renamed from include/asm-alpha/termios.h)0
-rw-r--r--arch/alpha/include/asm/thread_info.h (renamed from include/asm-alpha/thread_info.h)0
-rw-r--r--arch/alpha/include/asm/timex.h (renamed from include/asm-alpha/timex.h)0
-rw-r--r--arch/alpha/include/asm/tlb.h (renamed from include/asm-alpha/tlb.h)0
-rw-r--r--arch/alpha/include/asm/tlbflush.h (renamed from include/asm-alpha/tlbflush.h)0
-rw-r--r--arch/alpha/include/asm/topology.h (renamed from include/asm-alpha/topology.h)0
-rw-r--r--arch/alpha/include/asm/types.h (renamed from include/asm-alpha/types.h)0
-rw-r--r--arch/alpha/include/asm/uaccess.h (renamed from include/asm-alpha/uaccess.h)0
-rw-r--r--arch/alpha/include/asm/ucontext.h (renamed from include/asm-alpha/ucontext.h)0
-rw-r--r--arch/alpha/include/asm/unaligned.h (renamed from include/asm-alpha/unaligned.h)0
-rw-r--r--arch/alpha/include/asm/unistd.h (renamed from include/asm-alpha/unistd.h)0
-rw-r--r--arch/alpha/include/asm/user.h (renamed from include/asm-alpha/user.h)0
-rw-r--r--arch/alpha/include/asm/vga.h (renamed from include/asm-alpha/vga.h)0
-rw-r--r--arch/alpha/include/asm/xor.h (renamed from include/asm-alpha/xor.h)0
-rw-r--r--arch/arm/boot/compressed/.gitignore3
-rw-r--r--arch/arm/common/dmabounce.c56
-rw-r--r--arch/arm/configs/orion5x_defconfig13
-rw-r--r--arch/arm/include/asm/dma-mapping.h86
-rw-r--r--arch/arm/include/asm/kexec.h2
-rw-r--r--arch/arm/include/asm/memory.h22
-rw-r--r--arch/arm/include/asm/mtd-xip.h2
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/tlbflush.h7
-rw-r--r--arch/arm/include/asm/unistd.h6
-rw-r--r--arch/arm/kernel/.gitignore1
-rw-r--r--arch/arm/kernel/calls.S10
-rw-r--r--arch/arm/kernel/machine_kexec.c2
-rw-r--r--arch/arm/kernel/setup.c13
-rw-r--r--arch/arm/kernel/traps.c31
-rw-r--r--arch/arm/mach-footbridge/cats-pci.c3
-rw-r--r--arch/arm/mach-integrator/cpu.c2
-rw-r--r--arch/arm/mach-integrator/include/mach/platform.h2
-rw-r--r--arch/arm/mach-kirkwood/common.c247
-rw-r--r--arch/arm/mach-kirkwood/common.h3
-rw-r--r--arch/arm/mach-kirkwood/include/mach/kirkwood.h9
-rw-r--r--arch/arm/mach-kirkwood/irq.c2
-rw-r--r--arch/arm/mach-kirkwood/pcie.c2
-rw-r--r--arch/arm/mach-kirkwood/rd88f6192-nas-setup.c23
-rw-r--r--arch/arm/mach-kirkwood/rd88f6281-setup.c2
-rw-r--r--arch/arm/mach-lh7a40x/include/mach/ssp.h1
-rw-r--r--arch/arm/mach-lh7a40x/lcd-panel.h1
-rw-r--r--arch/arm/mach-loki/common.c4
-rw-r--r--arch/arm/mach-loki/irq.c2
-rw-r--r--arch/arm/mach-mv78xx0/common.c8
-rw-r--r--arch/arm/mach-mv78xx0/irq.c2
-rw-r--r--arch/arm/mach-mv78xx0/pcie.c2
-rw-r--r--arch/arm/mach-omap2/usb-tusb6010.c1
-rw-r--r--arch/arm/mach-orion5x/common.c115
-rw-r--r--arch/arm/mach-orion5x/common.h1
-rw-r--r--arch/arm/mach-orion5x/db88f5281-setup.c2
-rw-r--r--arch/arm/mach-orion5x/include/mach/orion5x.h5
-rw-r--r--arch/arm/mach-orion5x/irq.c2
-rw-r--r--arch/arm/mach-orion5x/kurobox_pro-setup.c3
-rw-r--r--arch/arm/mach-orion5x/mss2-setup.c1
-rw-r--r--arch/arm/mach-orion5x/mv2120-setup.c1
-rw-r--r--arch/arm/mach-orion5x/pci.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5182-setup.c1
-rw-r--r--arch/arm/mach-orion5x/ts209-setup.c5
-rw-r--r--arch/arm/mach-orion5x/ts409-setup.c48
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c1
-rw-r--r--arch/arm/mach-pxa/include/mach/mtd-xip.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/poodle.h6
-rw-r--r--arch/arm/mach-pxa/include/mach/pxafb.h10
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-clock.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-gpio.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-irq.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-lcd.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-mem.h2
-rw-r--r--arch/arm/mach-s3c2410/mach-bast.c2
-rw-r--r--arch/arm/mach-s3c2410/mach-smdk2410.c1
-rw-r--r--arch/arm/mach-s3c2410/mach-vr1000.c2
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c3
-rw-r--r--arch/arm/mach-s3c2440/mach-anubis.c2
-rw-r--r--arch/arm/mach-s3c2440/mach-osiris.c2
-rw-r--r--arch/arm/mach-sa1100/cpu-sa1110.c2
-rw-r--r--arch/arm/mach-sa1100/include/mach/mtd-xip.h2
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c2
-rw-r--r--arch/arm/mm/mmu.c50
-rw-r--r--arch/arm/mm/proc-arm940.S1
-rw-r--r--arch/arm/mm/proc-arm946.S1
-rw-r--r--arch/arm/plat-omap/include/mach/memory.h15
-rw-r--r--arch/arm/plat-orion/include/plat/cache-feroceon-l2.h (renamed from include/asm-arm/plat-orion/cache-feroceon-l2.h)2
-rw-r--r--arch/arm/plat-orion/include/plat/ehci-orion.h (renamed from include/asm-arm/plat-orion/ehci-orion.h)6
-rw-r--r--arch/arm/plat-orion/include/plat/irq.h (renamed from include/asm-arm/plat-orion/irq.h)6
-rw-r--r--arch/arm/plat-orion/include/plat/mv_xor.h (renamed from include/asm-arm/plat-orion/mv_xor.h)6
-rw-r--r--arch/arm/plat-orion/include/plat/orion_nand.h (renamed from include/asm-arm/plat-orion/orion_nand.h)6
-rw-r--r--arch/arm/plat-orion/include/plat/pcie.h (renamed from include/asm-arm/plat-orion/pcie.h)6
-rw-r--r--arch/arm/plat-orion/include/plat/time.h (renamed from include/asm-arm/plat-orion/time.h)6
-rw-r--r--arch/arm/plat-orion/irq.c2
-rw-r--r--arch/arm/plat-orion/pcie.c2
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c4
-rw-r--r--arch/arm/tools/mach-types53
-rw-r--r--arch/blackfin/Kconfig69
-rw-r--r--arch/blackfin/configs/BlackStamp_defconfig1195
-rw-r--r--arch/blackfin/configs/TCM-BF537_defconfig693
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cacheinit.c4
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cacheinit.c4
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c6
-rw-r--r--arch/blackfin/kernel/setup.c33
-rw-r--r--arch/blackfin/kernel/traps.c90
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S11
-rw-r--r--arch/blackfin/lib/ins.S109
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c60
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c60
-rw-r--r--arch/blackfin/mach-bf527/head.S289
-rw-r--r--arch/blackfin/mach-bf527/ints-priority.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/Kconfig6
-rw-r--r--arch/blackfin/mach-bf533/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c401
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c52
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c52
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c58
-rw-r--r--arch/blackfin/mach-bf533/head.S290
-rw-r--r--arch/blackfin/mach-bf533/ints-priority.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/Kconfig6
-rw-r--r--arch/blackfin/mach-bf537/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537.c62
-rw-r--r--arch/blackfin/mach-bf537/boards/generic_board.c51
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c6
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c6
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c18
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c590
-rw-r--r--arch/blackfin/mach-bf537/head.S321
-rw-r--r--arch/blackfin/mach-bf537/ints-priority.c2
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c8
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c14
-rw-r--r--arch/blackfin/mach-bf548/head.S259
-rw-r--r--arch/blackfin/mach-bf548/ints-priority.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c8
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c52
-rw-r--r--arch/blackfin/mach-bf561/head.S275
-rw-r--r--arch/blackfin/mach-bf561/ints-priority.c2
-rw-r--r--arch/blackfin/mach-common/Makefile5
-rw-r--r--arch/blackfin/mach-common/arch_checks.c9
-rw-r--r--arch/blackfin/mach-common/cache.S115
-rw-r--r--arch/blackfin/mach-common/cacheinit.S77
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S56
-rw-r--r--arch/blackfin/mach-common/entry.S34
-rw-r--r--arch/blackfin/mach-common/head.S207
-rw-r--r--arch/blackfin/mach-common/ints-priority.c68
-rw-r--r--arch/blackfin/mach-common/lock.S45
-rw-r--r--arch/blackfin/mach-common/pm.c35
-rw-r--r--arch/blackfin/mm/blackfin_sram.c12
-rw-r--r--arch/h8300/include/asm/Kbuild (renamed from include/asm-h8300/Kbuild)0
-rw-r--r--arch/h8300/include/asm/a.out.h (renamed from include/asm-h8300/a.out.h)0
-rw-r--r--arch/h8300/include/asm/atomic.h (renamed from include/asm-h8300/atomic.h)0
-rw-r--r--arch/h8300/include/asm/auxvec.h (renamed from include/asm-h8300/auxvec.h)0
-rw-r--r--arch/h8300/include/asm/bitops.h (renamed from include/asm-h8300/bitops.h)0
-rw-r--r--arch/h8300/include/asm/bootinfo.h (renamed from include/asm-h8300/bootinfo.h)0
-rw-r--r--arch/h8300/include/asm/bug.h (renamed from include/asm-h8300/bug.h)0
-rw-r--r--arch/h8300/include/asm/bugs.h (renamed from include/asm-h8300/bugs.h)0
-rw-r--r--arch/h8300/include/asm/byteorder.h (renamed from include/asm-h8300/byteorder.h)0
-rw-r--r--arch/h8300/include/asm/cache.h (renamed from include/asm-h8300/cache.h)0
-rw-r--r--arch/h8300/include/asm/cachectl.h (renamed from include/asm-h8300/cachectl.h)0
-rw-r--r--arch/h8300/include/asm/cacheflush.h (renamed from include/asm-h8300/cacheflush.h)0
-rw-r--r--arch/h8300/include/asm/checksum.h (renamed from include/asm-h8300/checksum.h)0
-rw-r--r--arch/h8300/include/asm/cputime.h (renamed from include/asm-h8300/cputime.h)0
-rw-r--r--arch/h8300/include/asm/current.h (renamed from include/asm-h8300/current.h)0
-rw-r--r--arch/h8300/include/asm/dbg.h (renamed from include/asm-h8300/dbg.h)0
-rw-r--r--arch/h8300/include/asm/delay.h (renamed from include/asm-h8300/delay.h)0
-rw-r--r--arch/h8300/include/asm/device.h (renamed from include/asm-h8300/device.h)0
-rw-r--r--arch/h8300/include/asm/div64.h (renamed from include/asm-h8300/div64.h)0
-rw-r--r--arch/h8300/include/asm/dma.h (renamed from include/asm-h8300/dma.h)0
-rw-r--r--arch/h8300/include/asm/elf.h (renamed from include/asm-h8300/elf.h)0
-rw-r--r--arch/h8300/include/asm/emergency-restart.h (renamed from include/asm-h8300/emergency-restart.h)0
-rw-r--r--arch/h8300/include/asm/errno.h (renamed from include/asm-h8300/errno.h)0
-rw-r--r--arch/h8300/include/asm/fb.h (renamed from include/asm-h8300/fb.h)0
-rw-r--r--arch/h8300/include/asm/fcntl.h (renamed from include/asm-h8300/fcntl.h)0
-rw-r--r--arch/h8300/include/asm/flat.h (renamed from include/asm-h8300/flat.h)0
-rw-r--r--arch/h8300/include/asm/fpu.h (renamed from include/asm-h8300/fpu.h)0
-rw-r--r--arch/h8300/include/asm/futex.h (renamed from include/asm-h8300/futex.h)0
-rw-r--r--arch/h8300/include/asm/gpio.h (renamed from include/asm-h8300/gpio.h)0
-rw-r--r--arch/h8300/include/asm/hardirq.h (renamed from include/asm-h8300/hardirq.h)0
-rw-r--r--arch/h8300/include/asm/hw_irq.h (renamed from include/asm-h8300/hw_irq.h)0
-rw-r--r--arch/h8300/include/asm/io.h (renamed from include/asm-h8300/io.h)0
-rw-r--r--arch/h8300/include/asm/ioctl.h (renamed from include/asm-h8300/ioctl.h)0
-rw-r--r--arch/h8300/include/asm/ioctls.h (renamed from include/asm-h8300/ioctls.h)0
-rw-r--r--arch/h8300/include/asm/ipcbuf.h (renamed from include/asm-h8300/ipcbuf.h)0
-rw-r--r--arch/h8300/include/asm/irq.h (renamed from include/asm-h8300/irq.h)0
-rw-r--r--arch/h8300/include/asm/irq_regs.h (renamed from include/asm-h8300/irq_regs.h)0
-rw-r--r--arch/h8300/include/asm/kdebug.h (renamed from include/asm-h8300/kdebug.h)0
-rw-r--r--arch/h8300/include/asm/kmap_types.h (renamed from include/asm-h8300/kmap_types.h)0
-rw-r--r--arch/h8300/include/asm/linkage.h (renamed from include/asm-h8300/linkage.h)0
-rw-r--r--arch/h8300/include/asm/local.h (renamed from include/asm-h8300/local.h)0
-rw-r--r--arch/h8300/include/asm/mc146818rtc.h (renamed from include/asm-h8300/mc146818rtc.h)0
-rw-r--r--arch/h8300/include/asm/md.h (renamed from include/asm-h8300/md.h)0
-rw-r--r--arch/h8300/include/asm/mman.h (renamed from include/asm-h8300/mman.h)0
-rw-r--r--arch/h8300/include/asm/mmu.h (renamed from include/asm-h8300/mmu.h)0
-rw-r--r--arch/h8300/include/asm/mmu_context.h (renamed from include/asm-h8300/mmu_context.h)0
-rw-r--r--arch/h8300/include/asm/module.h (renamed from include/asm-h8300/module.h)0
-rw-r--r--arch/h8300/include/asm/msgbuf.h (renamed from include/asm-h8300/msgbuf.h)0
-rw-r--r--arch/h8300/include/asm/mutex.h (renamed from include/asm-h8300/mutex.h)0
-rw-r--r--arch/h8300/include/asm/page.h (renamed from include/asm-h8300/page.h)0
-rw-r--r--arch/h8300/include/asm/page_offset.h (renamed from include/asm-h8300/page_offset.h)0
-rw-r--r--arch/h8300/include/asm/param.h (renamed from include/asm-h8300/param.h)0
-rw-r--r--arch/h8300/include/asm/pci.h (renamed from include/asm-h8300/pci.h)0
-rw-r--r--arch/h8300/include/asm/percpu.h (renamed from include/asm-h8300/percpu.h)0
-rw-r--r--arch/h8300/include/asm/pgalloc.h (renamed from include/asm-h8300/pgalloc.h)0
-rw-r--r--arch/h8300/include/asm/pgtable.h (renamed from include/asm-h8300/pgtable.h)0
-rw-r--r--arch/h8300/include/asm/poll.h (renamed from include/asm-h8300/poll.h)0
-rw-r--r--arch/h8300/include/asm/posix_types.h (renamed from include/asm-h8300/posix_types.h)0
-rw-r--r--arch/h8300/include/asm/processor.h (renamed from include/asm-h8300/processor.h)0
-rw-r--r--arch/h8300/include/asm/ptrace.h (renamed from include/asm-h8300/ptrace.h)0
-rw-r--r--arch/h8300/include/asm/regs267x.h (renamed from include/asm-h8300/regs267x.h)0
-rw-r--r--arch/h8300/include/asm/regs306x.h (renamed from include/asm-h8300/regs306x.h)0
-rw-r--r--arch/h8300/include/asm/resource.h (renamed from include/asm-h8300/resource.h)0
-rw-r--r--arch/h8300/include/asm/scatterlist.h (renamed from include/asm-h8300/scatterlist.h)0
-rw-r--r--arch/h8300/include/asm/sections.h (renamed from include/asm-h8300/sections.h)0
-rw-r--r--arch/h8300/include/asm/segment.h (renamed from include/asm-h8300/segment.h)0
-rw-r--r--arch/h8300/include/asm/sembuf.h (renamed from include/asm-h8300/sembuf.h)0
-rw-r--r--arch/h8300/include/asm/setup.h (renamed from include/asm-h8300/setup.h)0
-rw-r--r--arch/h8300/include/asm/sh_bios.h (renamed from include/asm-h8300/sh_bios.h)0
-rw-r--r--arch/h8300/include/asm/shm.h (renamed from include/asm-h8300/shm.h)0
-rw-r--r--arch/h8300/include/asm/shmbuf.h (renamed from include/asm-h8300/shmbuf.h)0
-rw-r--r--arch/h8300/include/asm/shmparam.h (renamed from include/asm-h8300/shmparam.h)0
-rw-r--r--arch/h8300/include/asm/sigcontext.h (renamed from include/asm-h8300/sigcontext.h)0
-rw-r--r--arch/h8300/include/asm/siginfo.h (renamed from include/asm-h8300/siginfo.h)0
-rw-r--r--arch/h8300/include/asm/signal.h (renamed from include/asm-h8300/signal.h)0
-rw-r--r--arch/h8300/include/asm/smp.h (renamed from include/asm-h8300/smp.h)0
-rw-r--r--arch/h8300/include/asm/socket.h (renamed from include/asm-h8300/socket.h)0
-rw-r--r--arch/h8300/include/asm/sockios.h (renamed from include/asm-h8300/sockios.h)0
-rw-r--r--arch/h8300/include/asm/spinlock.h (renamed from include/asm-h8300/spinlock.h)0
-rw-r--r--arch/h8300/include/asm/stat.h (renamed from include/asm-h8300/stat.h)0
-rw-r--r--arch/h8300/include/asm/statfs.h (renamed from include/asm-h8300/statfs.h)0
-rw-r--r--arch/h8300/include/asm/string.h (renamed from include/asm-h8300/string.h)0
-rw-r--r--arch/h8300/include/asm/system.h (renamed from include/asm-h8300/system.h)0
-rw-r--r--arch/h8300/include/asm/target_time.h (renamed from include/asm-h8300/target_time.h)0
-rw-r--r--arch/h8300/include/asm/termbits.h (renamed from include/asm-h8300/termbits.h)0
-rw-r--r--arch/h8300/include/asm/termios.h (renamed from include/asm-h8300/termios.h)0
-rw-r--r--arch/h8300/include/asm/thread_info.h (renamed from include/asm-h8300/thread_info.h)0
-rw-r--r--arch/h8300/include/asm/timex.h (renamed from include/asm-h8300/timex.h)0
-rw-r--r--arch/h8300/include/asm/tlb.h (renamed from include/asm-h8300/tlb.h)0
-rw-r--r--arch/h8300/include/asm/tlbflush.h (renamed from include/asm-h8300/tlbflush.h)0
-rw-r--r--arch/h8300/include/asm/topology.h (renamed from include/asm-h8300/topology.h)0
-rw-r--r--arch/h8300/include/asm/traps.h (renamed from include/asm-h8300/traps.h)0
-rw-r--r--arch/h8300/include/asm/types.h (renamed from include/asm-h8300/types.h)0
-rw-r--r--arch/h8300/include/asm/uaccess.h (renamed from include/asm-h8300/uaccess.h)0
-rw-r--r--arch/h8300/include/asm/ucontext.h (renamed from include/asm-h8300/ucontext.h)0
-rw-r--r--arch/h8300/include/asm/unaligned.h (renamed from include/asm-h8300/unaligned.h)0
-rw-r--r--arch/h8300/include/asm/unistd.h (renamed from include/asm-h8300/unistd.h)0
-rw-r--r--arch/h8300/include/asm/user.h (renamed from include/asm-h8300/user.h)0
-rw-r--r--arch/h8300/include/asm/virtconvert.h (renamed from include/asm-h8300/virtconvert.h)0
-rw-r--r--arch/h8300/mm/init.c9
-rw-r--r--arch/ia64/include/asm/kexec.h2
-rw-r--r--arch/ia64/include/asm/sal.h2
-rw-r--r--arch/ia64/kernel/head.S26
-rw-r--r--arch/ia64/kernel/setup.c18
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S3
-rw-r--r--arch/ia64/mm/contig.c10
-rw-r--r--arch/ia64/mm/discontig.c6
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/hvcall.h22
-rw-r--r--arch/powerpc/include/asm/irqflags.h5
-rw-r--r--arch/powerpc/include/asm/kexec.h2
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/kernel/head_32.S2
-rw-r--r--arch/powerpc/kernel/lparcfg.c5
-rw-r--r--arch/powerpc/kernel/machine_kexec_32.c2
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/module.c15
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c57
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h27
-rw-r--r--arch/powerpc/platforms/pseries/setup.c28
-rw-r--r--arch/s390/include/asm/kexec.h2
-rw-r--r--arch/sh/include/asm/kexec.h2
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/of_device.h3
-rw-r--r--arch/sparc64/kernel/irq.c52
-rw-r--r--arch/sparc64/kernel/kstack.h60
-rw-r--r--arch/sparc64/kernel/process.c27
-rw-r--r--arch/sparc64/kernel/smp.c4
-rw-r--r--arch/sparc64/kernel/stacktrace.c13
-rw-r--r--arch/sparc64/kernel/traps.c7
-rw-r--r--arch/sparc64/lib/mcount.S39
-rw-r--r--arch/sparc64/mm/init.c41
-rw-r--r--arch/sparc64/mm/ultra.S2
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/boot/boot.h10
-rw-r--r--arch/x86/boot/cpu.c3
-rw-r--r--arch/x86/boot/cpucheck.c10
-rw-r--r--arch/x86/boot/main.c5
-rw-r--r--arch/x86/boot/memory.c1
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/amd_iommu.c21
-rw-r--r--arch/x86/kernel/amd_iommu_init.c24
-rw-r--r--arch/x86/kernel/apic_32.c22
-rw-r--r--arch/x86/kernel/apic_64.c7
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c17
-rw-r--r--arch/x86/kernel/cpu/bugs.c6
-rw-r--r--arch/x86/kernel/cpu/cyrix.c18
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c15
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c5
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c8
-rw-r--r--arch/x86/kernel/efi_32.c4
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/hpet.c24
-rw-r--r--arch/x86/kernel/io_apic_32.c6
-rw-r--r--arch/x86/kernel/io_apic_64.c25
-rw-r--r--arch/x86/kernel/machine_kexec_32.c20
-rw-r--r--arch/x86/kernel/mfgpt_32.c52
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/mpparse.c17
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/nmi.c28
-rw-r--r--arch/x86/kernel/numaq_32.c2
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c16
-rw-r--r--arch/x86/kernel/process_32.c5
-rw-r--r--arch/x86/kernel/process_64.c5
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S10
-rw-r--r--arch/x86/kernel/setup.c24
-rw-r--r--arch/x86/kernel/signal_64.c11
-rw-r--r--arch/x86/kernel/smpboot.c65
-rw-r--r--arch/x86/kernel/smpcommon.c17
-rw-r--r--arch/x86/kernel/tlb_uv.c3
-rw-r--r--arch/x86/kernel/traps_64.c9
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c6
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmi_32.c3
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S8
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/init_64.c12
-rw-r--r--arch/x86/mm/ioremap.c10
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/pageattr-test.c3
-rw-r--r--arch/x86/mm/pageattr.c27
-rw-r--r--arch/x86/mm/pat.c50
-rw-r--r--arch/x86/mm/pgtable.c3
-rw-r--r--arch/x86/mm/srat_32.c12
-rw-r--r--arch/x86/oprofile/nmi_int.c39
-rw-r--r--arch/x86/pci/mmconfig-shared.c2
-rw-r--r--arch/x86/power/cpu_32.c6
-rw-r--r--arch/x86/power/hibernate_asm_32.S26
-rw-r--r--crypto/digest.c2
-rw-r--r--crypto/tcrypt.c28
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/dock.c11
-rw-r--r--drivers/acpi/ec.c36
-rw-r--r--drivers/acpi/executer/exconfig.c3
-rw-r--r--drivers/acpi/namespace/nsnames.c34
-rw-r--r--drivers/acpi/pci_link.c12
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/resources/rscalc.c3
-rw-r--r--drivers/acpi/utilities/utalloc.c8
-rw-r--r--drivers/acpi/utilities/utdelete.c13
-rw-r--r--drivers/acpi/utilities/utobject.c13
-rw-r--r--drivers/acpi/wmi.c2
-rw-r--r--drivers/char/agp/agp.h3
-rw-r--r--drivers/char/agp/ali-agp.c10
-rw-r--r--drivers/char/agp/amd-k7-agp.c10
-rw-r--r--drivers/char/agp/amd64-agp.c51
-rw-r--r--drivers/char/agp/ati-agp.c7
-rw-r--r--drivers/char/agp/backend.c28
-rw-r--r--drivers/char/agp/generic.c41
-rw-r--r--drivers/char/agp/intel-agp.c83
-rw-r--r--drivers/char/agp/isoch.c37
-rw-r--r--drivers/char/agp/sis-agp.c17
-rw-r--r--drivers/char/agp/sworks-agp.c25
-rw-r--r--drivers/char/agp/uninorth-agp.c32
-rw-r--r--drivers/char/hvc_console.c5
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c1
-rw-r--r--drivers/char/rtc.c1
-rw-r--r--drivers/char/synclink_gt.c1
-rw-r--r--drivers/char/tty_io.c72
-rw-r--r--drivers/char/vt.c82
-rw-r--r--drivers/char/vt_ioctl.c4
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
-rw-r--r--drivers/cpuidle/governors/ladder.c26
-rw-r--r--drivers/cpuidle/governors/menu.c42
-rw-r--r--drivers/cpuidle/sysfs.c29
-rw-r--r--drivers/crypto/padlock-aes.c28
-rw-r--r--drivers/crypto/padlock-sha.c9
-rw-r--r--drivers/crypto/talitos.c54
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/firmware/memmap.c61
-rw-r--r--drivers/hid/usbhid/hid-quirks.c12
-rw-r--r--drivers/hwmon/Kconfig16
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abituguru3.c134
-rw-r--r--drivers/hwmon/adcxx.c329
-rw-r--r--drivers/hwmon/applesmc.c20
-rw-r--r--drivers/hwmon/coretemp.c5
-rw-r--r--drivers/hwmon/hwmon-vid.c36
-rw-r--r--drivers/hwmon/i5k_amb.c28
-rw-r--r--drivers/hwmon/ibmaem.c27
-rw-r--r--drivers/hwmon/w83791d.c3
-rw-r--r--drivers/i2c/chips/isp1301_omap.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c48
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c60
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c17
-rw-r--r--drivers/input/evdev.c63
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/gpio_keys.c3
-rw-r--r--drivers/input/mouse/Kconfig23
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/bcm5974.c681
-rw-r--r--drivers/input/serio/i8042-sparcio.h3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/xilinx_ps2.c4
-rw-r--r--drivers/input/touchscreen/Kconfig21
-rw-r--r--drivers/lguest/page_tables.c25
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c32
-rw-r--r--drivers/misc/acer-wmi.c19
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/mmc/host/s3cmci.c17
-rw-r--r--drivers/mmc/host/sdricoh_cs.c1
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/net/bnx2x.h87
-rw-r--r--drivers/net/bnx2x_fw_defs.h160
-rw-r--r--drivers/net/bnx2x_hsi.h16
-rw-r--r--drivers/net/bnx2x_init.h26
-rw-r--r--drivers/net/bnx2x_init_values.h533
-rw-r--r--drivers/net/bnx2x_link.c1258
-rw-r--r--drivers/net/bnx2x_link.h11
-rw-r--r--drivers/net/bnx2x_main.c1212
-rw-r--r--drivers/net/bnx2x_reg.h210
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c49
-rw-r--r--drivers/rtc/rtc-dev.c12
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/sbus/sbus.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/serial/Kconfig1
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/serial/sunsu.c2
-rw-r--r--drivers/serial/sunzilog.c2
-rw-r--r--drivers/spi/spi.c40
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/class/cdc-acm.c86
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/core/driver.c5
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/f_acm.c196
-rw-r--r--drivers/usb/gadget/f_ecm.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/f_serial.c2
-rw-r--r--drivers/usb/gadget/f_subset.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h6
-rw-r--r--drivers/usb/gadget/omap_udc.c5
-rw-r--r--drivers/usb/gadget/u_serial.c290
-rw-r--r--drivers/usb/gadget/u_serial.h12
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/isp1760-hcd.c53
-rw-r--r--drivers/usb/host/isp1760-hcd.h5
-rw-r--r--drivers/usb/host/ohci-hcd.c23
-rw-r--r--drivers/usb/host/ohci-hub.c11
-rw-r--r--drivers/usb/host/ohci-omap.c3
-rw-r--r--drivers/usb/host/ohci-pci.c132
-rw-r--r--drivers/usb/host/ohci-q.c6
-rw-r--r--drivers/usb/host/ohci.h11
-rw-r--r--drivers/usb/host/r8a66597-hcd.c49
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/auerswald.c2152
-rw-r--r--drivers/usb/misc/isight_firmware.c4
-rw-r--r--drivers/usb/musb/Kconfig176
-rw-r--r--drivers/usb/musb/Makefile86
-rw-r--r--drivers/usb/musb/cppi_dma.c1540
-rw-r--r--drivers/usb/musb/cppi_dma.h133
-rw-r--r--drivers/usb/musb/davinci.c462
-rw-r--r--drivers/usb/musb/davinci.h100
-rw-r--r--drivers/usb/musb/musb_core.c2261
-rw-r--r--drivers/usb/musb/musb_core.h507
-rw-r--r--drivers/usb/musb/musb_debug.h66
-rw-r--r--drivers/usb/musb/musb_dma.h172
-rw-r--r--drivers/usb/musb/musb_gadget.c2031
-rw-r--r--drivers/usb/musb/musb_gadget.h108
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c981
-rw-r--r--drivers/usb/musb/musb_host.c2170
-rw-r--r--drivers/usb/musb/musb_host.h110
-rw-r--r--drivers/usb/musb/musb_io.h115
-rw-r--r--drivers/usb/musb/musb_procfs.c830
-rw-r--r--drivers/usb/musb/musb_regs.h300
-rw-r--r--drivers/usb/musb/musb_virthub.c425
-rw-r--r--drivers/usb/musb/musbhsdma.c433
-rw-r--r--drivers/usb/musb/omap2430.c324
-rw-r--r--drivers/usb/musb/omap2430.h56
-rw-r--r--drivers/usb/musb/tusb6010.c1151
-rw-r--r--drivers/usb/musb/tusb6010.h233
-rw-r--r--drivers/usb/musb/tusb6010_omap.c719
-rw-r--r--drivers/usb/serial/Kconfig7
-rw-r--r--drivers/usb/serial/ftdi_sio.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.h7
-rw-r--r--drivers/usb/serial/option.c44
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/sierra.c170
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/storage/Kconfig12
-rw-r--r--drivers/usb/storage/Makefile1
-rw-r--r--drivers/usb/storage/sierra_ms.c207
-rw-r--r--drivers/usb/storage/sierra_ms.h4
-rw-r--r--drivers/usb/storage/transport.c17
-rw-r--r--drivers/usb/storage/unusual_devs.h40
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/atmel_lcdfb.c13
-rw-r--r--drivers/video/aty/radeon_accel.c8
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/fsl-diu-fb.c32
-rw-r--r--drivers/video/matrox/i2c-matroxfb.c21
-rw-r--r--drivers/video/matrox/matroxfb_maven.c97
-rw-r--r--drivers/video/pxafb.c68
-rw-r--r--drivers/watchdog/Kconfig5
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/inode.c1
-rw-r--r--fs/dlm/config.c203
-rw-r--r--fs/dlm/user.c10
-rw-r--r--fs/eventpoll.c5
-rw-r--r--fs/inode.c1
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/lockd/svc4proc.c4
-rw-r--r--fs/lockd/svcproc.c4
-rw-r--r--fs/nfsd/export.c6
-rw-r--r--fs/omfs/bitmap.c5
-rw-r--r--fs/omfs/file.c33
-rw-r--r--fs/omfs/inode.c3
-rw-r--r--fs/reiserfs/super.c1
-rw-r--r--fs/seq_file.c14
-rw-r--r--fs/ubifs/budget.c33
-rw-r--r--fs/ubifs/commit.c3
-rw-r--r--fs/ubifs/debug.c27
-rw-r--r--fs/ubifs/debug.h143
-rw-r--r--fs/ubifs/dir.c24
-rw-r--r--fs/ubifs/file.c8
-rw-r--r--fs/ubifs/find.c9
-rw-r--r--fs/ubifs/io.c14
-rw-r--r--fs/ubifs/journal.c110
-rw-r--r--fs/ubifs/log.c4
-rw-r--r--fs/ubifs/misc.h16
-rw-r--r--fs/ubifs/orphan.c4
-rw-r--r--fs/ubifs/super.c48
-rw-r--r--fs/ubifs/tnc_commit.c37
-rw-r--r--fs/ubifs/ubifs-media.h4
-rw-r--r--fs/ubifs/ubifs.h33
-rw-r--r--fs/ubifs/xattr.c54
-rw-r--r--fs/xfs/linux-2.6/sema.h52
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c16
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c10
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c192
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.h15
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c189
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c22
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h65
-rw-r--r--fs/xfs/quota/xfs_dquot.c38
-rw-r--r--fs/xfs/quota/xfs_dquot.h29
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c8
-rw-r--r--fs/xfs/quota/xfs_qm.c14
-rw-r--r--fs/xfs/quota/xfs_qm.h2
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c7
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/xfs_acl.c52
-rw-r--r--fs/xfs/xfs_acl.h14
-rw-r--r--fs/xfs/xfs_arch.h68
-rw-r--r--fs/xfs/xfs_attr.c110
-rw-r--r--fs/xfs/xfs_attr.h1
-rw-r--r--fs/xfs/xfs_attr_leaf.c75
-rw-r--r--fs/xfs/xfs_attr_leaf.h2
-rw-r--r--fs/xfs/xfs_bit.c103
-rw-r--r--fs/xfs/xfs_bit.h34
-rw-r--r--fs/xfs/xfs_bmap.c34
-rw-r--r--fs/xfs/xfs_btree.c105
-rw-r--r--fs/xfs/xfs_btree.h8
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_dfrag.c33
-rw-r--r--fs/xfs/xfs_error.c5
-rw-r--r--fs/xfs/xfs_error.h12
-rw-r--r--fs/xfs/xfs_filestream.c2
-rw-r--r--fs/xfs/xfs_ialloc_btree.c30
-rw-r--r--fs/xfs/xfs_iget.c48
-rw-r--r--fs/xfs/xfs_inode.c70
-rw-r--r--fs/xfs/xfs_inode.h46
-rw-r--r--fs/xfs/xfs_inode_item.c11
-rw-r--r--fs/xfs/xfs_itable.c4
-rw-r--r--fs/xfs/xfs_log.c86
-rw-r--r--fs/xfs/xfs_log.h2
-rw-r--r--fs/xfs/xfs_log_priv.h14
-rw-r--r--fs/xfs/xfs_log_recover.c7
-rw-r--r--fs/xfs/xfs_mount.c82
-rw-r--r--fs/xfs/xfs_mount.h17
-rw-r--r--fs/xfs/xfs_rtalloc.c19
-rw-r--r--fs/xfs/xfs_rw.c2
-rw-r--r--fs/xfs/xfs_trans.c75
-rw-r--r--fs/xfs/xfs_trans.h12
-rw-r--r--fs/xfs/xfs_trans_buf.c12
-rw-r--r--fs/xfs/xfs_trans_item.c66
-rw-r--r--fs/xfs/xfs_utils.c4
-rw-r--r--fs/xfs/xfs_utils.h3
-rw-r--r--fs/xfs/xfs_vfsops.c13
-rw-r--r--fs/xfs/xfs_vnodeops.c198
-rw-r--r--include/acpi/acnamesp.h2
-rw-r--r--include/asm-arm/plat-s3c/regs-nand.h2
-rw-r--r--include/asm-arm/plat-s3c/regs-timer.h2
-rw-r--r--include/asm-arm/plat-s3c/regs-watchdog.h2
-rw-r--r--include/asm-arm/plat-s3c24xx/s3c2410.h4
-rw-r--r--include/asm-blackfin/Kbuild2
-rw-r--r--include/asm-blackfin/bfin-global.h38
-rw-r--r--include/asm-blackfin/dpmc.h1
-rw-r--r--include/asm-blackfin/fixed_code.h24
-rw-r--r--include/asm-blackfin/mach-bf527/mem_map.h5
-rw-r--r--include/asm-blackfin/mach-bf533/mem_init.h2
-rw-r--r--include/asm-blackfin/mach-bf533/mem_map.h5
-rw-r--r--include/asm-blackfin/mach-bf537/mem_map.h5
-rw-r--r--include/asm-blackfin/mach-common/cdef_LPBlackfin.h8
-rw-r--r--include/asm-blackfin/unistd.h8
-rw-r--r--include/asm-generic/ioctl.h4
-rw-r--r--include/asm-mips/kexec.h2
-rw-r--r--include/asm-x86/amd_iommu_types.h8
-rw-r--r--include/asm-x86/atomic_64.h8
-rw-r--r--include/asm-x86/efi.h2
-rw-r--r--include/asm-x86/geode.h3
-rw-r--r--include/asm-x86/hw_irq.h12
-rw-r--r--include/asm-x86/i387.h34
-rw-r--r--include/asm-x86/io.h18
-rw-r--r--include/asm-x86/irq_vectors.h11
-rw-r--r--include/asm-x86/kexec.h8
-rw-r--r--include/asm-x86/mman.h1
-rw-r--r--include/asm-x86/mmconfig.h2
-rw-r--r--include/asm-x86/mmzone_32.h6
-rw-r--r--include/asm-x86/percpu.h2
-rw-r--r--include/asm-x86/pgtable_64.h2
-rw-r--r--include/asm-x86/processor.h23
-rw-r--r--include/asm-x86/spinlock.h4
-rw-r--r--include/asm-x86/uv/uv_bau.h5
-rw-r--r--include/crypto/hash.h18
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/agp_backend.h5
-rw-r--r--include/linux/bitmap.h1
-rw-r--r--include/linux/bootmem.h4
-rw-r--r--include/linux/byteorder.h372
-rw-r--r--include/linux/capability.h15
-rw-r--r--include/linux/completion.h3
-rw-r--r--include/linux/cred.h50
-rw-r--r--include/linux/firmware-map.h26
-rw-r--r--include/linux/ftrace.h21
-rw-r--r--include/linux/i2c-id.h2
-rw-r--r--include/linux/init.h1
-rw-r--r--include/linux/ivtv.h6
-rw-r--r--include/linux/ivtvfb.h6
-rw-r--r--include/linux/kernel.h14
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/lockdep.h70
-rw-r--r--include/linux/mm.h22
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/rcuclassic.h2
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/sched.h32
-rw-r--r--include/linux/security.h39
-rw-r--r--include/linux/seq_file.h12
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/suspend.h4
-rw-r--r--include/linux/swab.h309
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_driver.h14
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/musb.h98
-rw-r--r--include/linux/usb/serial.h3
-rw-r--r--include/linux/videodev2.h4
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/linux/vt_kern.h1
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_vs.h32
-rw-r--r--include/net/pkt_sched.h5
-rw-r--r--include/video/atmel_lcdc.h1
-rw-r--r--include/video/radeon.h4
-rw-r--r--init/Kconfig11
-rw-r--r--init/main.c6
-rw-r--r--kernel/Kconfig.hz2
-rw-r--r--kernel/capability.c21
-rw-r--r--kernel/cpu.c7
-rw-r--r--kernel/irq/proc.c96
-rw-r--r--kernel/kexec.c66
-rw-r--r--kernel/lockdep.c303
-rw-r--r--kernel/lockdep_internals.h19
-rw-r--r--kernel/lockdep_proc.c45
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/posix-timers.c19
-rw-r--r--kernel/ptrace.c5
-rw-r--r--kernel/sched.c77
-rw-r--r--kernel/sched_clock.c178
-rw-r--r--kernel/sched_fair.c21
-rw-r--r--kernel/sched_rt.c10
-rw-r--r--kernel/signal.c1
-rw-r--r--kernel/smp.c58
-rw-r--r--kernel/spinlock.c12
-rw-r--r--kernel/stop_machine.c1
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/workqueue.c24
-rw-r--r--lib/Kconfig.debug17
-rw-r--r--lib/bitmap.c11
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/lmb.c2
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/bootmem.c2
-rw-r--r--mm/hugetlb.c62
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/mmap.c20
-rw-r--r--mm/oom_kill.c6
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/sparse.c1
-rw-r--r--mm/util.c15
-rw-r--r--net/core/gen_estimator.c9
-rw-r--r--net/core/pktgen.c29
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/ipv4/igmp.c71
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c27
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c116
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c2
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/rxrpc/ar-accept.c2
-rw-r--r--net/sched/act_api.c13
-rw-r--r--net/sched/sch_api.c50
-rw-r--r--net/sched/sch_generic.c11
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/wireless/wext.c1
-rw-r--r--net/xfrm/xfrm_output.c5
-rw-r--r--security/capability.c3
-rw-r--r--security/commoncap.c24
-rw-r--r--security/root_plug.c3
-rw-r--r--security/security.c10
-rw-r--r--security/selinux/hooks.c25
-rw-r--r--security/smack/smack_lsm.c49
-rw-r--r--sound/pci/Kconfig2
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/patch_realtek.c45
-rw-r--r--sound/pci/oxygen/virtuoso.c73
-rw-r--r--sound/soc/codecs/wm8750.c3
-rw-r--r--sound/soc/codecs/wm8990.c10
-rw-r--r--sound/soc/codecs/wm8990.h14
-rw-r--r--sound/soc/pxa/spitz.c2
927 files changed, 30246 insertions, 10215 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 1d1b34500b69..1615350b7b53 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -102,6 +102,13 @@ C-procfs-example = procfs_example.xml
C-procfs-example2 = $(addprefix $(obj)/,$(C-procfs-example))
$(obj)/procfs-guide.xml: $(C-procfs-example2)
+# List of programs to build
+##oops, this is a kernel module::hostprogs-y := procfs_example
+obj-m += procfs_example.o
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
+
notfoundtemplate = echo "*** You have to install docbook-utils or xmlto ***"; \
exit 1
db2xtemplate = db2TYPE -o $(dir $@) $<
diff --git a/Documentation/DocBook/procfs_example.c b/Documentation/DocBook/procfs_example.c
index 7064084c1c5e..2f3de0fb8365 100644
--- a/Documentation/DocBook/procfs_example.c
+++ b/Documentation/DocBook/procfs_example.c
@@ -189,8 +189,6 @@ static int __init init_procfs_example(void)
return 0;
no_symlink:
- remove_proc_entry("tty", example_dir);
-no_tty:
remove_proc_entry("bar", example_dir);
no_bar:
remove_proc_entry("foo", example_dir);
@@ -206,7 +204,6 @@ out:
static void __exit cleanup_procfs_example(void)
{
remove_proc_entry("jiffies_too", example_dir);
- remove_proc_entry("tty", example_dir);
remove_proc_entry("bar", example_dir);
remove_proc_entry("foo", example_dir);
remove_proc_entry("jiffies", example_dir);
@@ -222,3 +219,4 @@ module_exit(cleanup_procfs_example);
MODULE_AUTHOR("Erik Mouw");
MODULE_DESCRIPTION("procfs examples");
+MODULE_LICENSE("GPL");
diff --git a/Documentation/Makefile b/Documentation/Makefile
new file mode 100644
index 000000000000..94b945733534
--- /dev/null
+++ b/Documentation/Makefile
@@ -0,0 +1,3 @@
+obj-m := DocBook/ accounting/ auxdisplay/ connector/ \
+ filesystems/configfs/ ia64/ networking/ \
+ pcmcia/ spi/ video4linux/ vm/ watchdog/src/
diff --git a/Documentation/accounting/Makefile b/Documentation/accounting/Makefile
new file mode 100644
index 000000000000..31929eb875b1
--- /dev/null
+++ b/Documentation/accounting/Makefile
@@ -0,0 +1,10 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := getdelays
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
+
+HOSTCFLAGS_getdelays.o += -I$(objtree)/usr/include
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index 3f7755f3963f..cc49400b4af8 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -201,13 +201,19 @@ void print_delayacct(struct taskstats *t)
"RECLAIM %12s%15s\n"
" %15llu%15llu\n",
"count", "real total", "virtual total", "delay total",
- t->cpu_count, t->cpu_run_real_total, t->cpu_run_virtual_total,
- t->cpu_delay_total,
+ (unsigned long long)t->cpu_count,
+ (unsigned long long)t->cpu_run_real_total,
+ (unsigned long long)t->cpu_run_virtual_total,
+ (unsigned long long)t->cpu_delay_total,
"count", "delay total",
- t->blkio_count, t->blkio_delay_total,
- "count", "delay total", t->swapin_count, t->swapin_delay_total,
+ (unsigned long long)t->blkio_count,
+ (unsigned long long)t->blkio_delay_total,
"count", "delay total",
- t->freepages_count, t->freepages_delay_total);
+ (unsigned long long)t->swapin_count,
+ (unsigned long long)t->swapin_delay_total,
+ "count", "delay total",
+ (unsigned long long)t->freepages_count,
+ (unsigned long long)t->freepages_delay_total);
}
void task_context_switch_counts(struct taskstats *t)
@@ -215,14 +221,17 @@ void task_context_switch_counts(struct taskstats *t)
printf("\n\nTask %15s%15s\n"
" %15llu%15llu\n",
"voluntary", "nonvoluntary",
- t->nvcsw, t->nivcsw);
+ (unsigned long long)t->nvcsw, (unsigned long long)t->nivcsw);
}
void print_cgroupstats(struct cgroupstats *c)
{
printf("sleeping %llu, blocked %llu, running %llu, stopped %llu, "
- "uninterruptible %llu\n", c->nr_sleeping, c->nr_io_wait,
- c->nr_running, c->nr_stopped, c->nr_uninterruptible);
+ "uninterruptible %llu\n", (unsigned long long)c->nr_sleeping,
+ (unsigned long long)c->nr_io_wait,
+ (unsigned long long)c->nr_running,
+ (unsigned long long)c->nr_stopped,
+ (unsigned long long)c->nr_uninterruptible);
}
diff --git a/Documentation/arm/Samsung-S3C24XX/GPIO.txt b/Documentation/arm/Samsung-S3C24XX/GPIO.txt
index b5d20c0b2ab4..ea7ccfc4b274 100644
--- a/Documentation/arm/Samsung-S3C24XX/GPIO.txt
+++ b/Documentation/arm/Samsung-S3C24XX/GPIO.txt
@@ -13,6 +13,21 @@ Introduction
data-sheet/users manual to find out the complete list.
+GPIOLIB
+-------
+
+ With the event of the GPIOLIB in drivers/gpio, support for some
+ of the GPIO functions such as reading and writing a pin will
+ be removed in favour of this common access method.
+
+ Once all the extant drivers have been converted, the functions
+ listed below will be removed (they may be marked as __deprecated
+ in the near future).
+
+ - s3c2410_gpio_getpin
+ - s3c2410_gpio_setpin
+
+
Headers
-------
diff --git a/Documentation/arm/Samsung-S3C24XX/Overview.txt b/Documentation/arm/Samsung-S3C24XX/Overview.txt
index 014a8ec4877d..cff6227b4484 100644
--- a/Documentation/arm/Samsung-S3C24XX/Overview.txt
+++ b/Documentation/arm/Samsung-S3C24XX/Overview.txt
@@ -8,9 +8,10 @@ Introduction
The Samsung S3C24XX range of ARM9 System-on-Chip CPUs are supported
by the 's3c2410' architecture of ARM Linux. Currently the S3C2410,
- S3C2412, S3C2413, S3C2440 and S3C2442 devices are supported.
+ S3C2412, S3C2413, S3C2440, S3C2442 and S3C2443 devices are supported.
+
+ Support for the S3C2400 and S3C24A0 series are in progress.
- Support for the S3C2400 series is in progress.
Configuration
-------------
@@ -38,6 +39,22 @@ Layout
Register, kernel and platform data definitions are held in the
arch/arm/mach-s3c2410 directory./include/mach
+arch/arm/plat-s3c24xx:
+
+ Files in here are either common to all the s3c24xx family,
+ or are common to only some of them with names to indicate this
+ status. The files that are not common to all are generally named
+ with the initial cpu they support in the series to ensure a short
+ name without any possibility of confusion with newer devices.
+
+ As an example, initially s3c244x would cover s3c2440 and s3c2442, but
+ with the s3c2443 which does not share many of the same drivers in
+ this directory, the name becomes invalid. We stick to s3c2440-<x>
+ to indicate a driver that is s3c2440 and s3c2442 compatible.
+
+ This does mean that to find the status of any given SoC, a number
+ of directories may need to be searched.
+
Machines
--------
@@ -159,6 +176,17 @@ NAND
For more information see Documentation/arm/Samsung-S3C24XX/NAND.txt
+SD/MMC
+------
+
+ The SD/MMC hardware pre S3C2443 is supported in the current
+ kernel, the driver is drivers/mmc/host/s3cmci.c and supports
+ 1 and 4 bit SD or MMC cards.
+
+ The SDIO behaviour of this driver has not been fully tested. There is no
+ current support for hardware SDIO interrupts.
+
+
Serial
------
@@ -178,6 +206,9 @@ GPIO
The core contains support for manipulating the GPIO, see the
documentation in GPIO.txt in the same directory as this file.
+ Newer kernels carry GPIOLIB, and support is being moved towards
+ this with some of the older support in line to be removed.
+
Clock Management
----------------
diff --git a/Documentation/auxdisplay/Makefile b/Documentation/auxdisplay/Makefile
new file mode 100644
index 000000000000..51fe23332c81
--- /dev/null
+++ b/Documentation/auxdisplay/Makefile
@@ -0,0 +1,10 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := cfag12864b-example
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
+
+HOSTCFLAGS_cfag12864b-example.o += -I$(objtree)/usr/include
diff --git a/Documentation/connector/Makefile b/Documentation/connector/Makefile
new file mode 100644
index 000000000000..8df1a7285a06
--- /dev/null
+++ b/Documentation/connector/Makefile
@@ -0,0 +1,11 @@
+ifneq ($(CONFIG_CONNECTOR),)
+obj-m += cn_test.o
+endif
+
+# List of programs to build
+hostprogs-y := ucon
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
+
+HOSTCFLAGS_ucon.o += -I$(objtree)/usr/include
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index ba0aacde94fb..94bbc27ddd4f 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -59,15 +59,10 @@ apicid values in those tables for disabled apics. In the event BIOS doesn't
mark such hot-pluggable cpus as disabled entries, one could use this
parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map.
-s390 uses the number of cpus it detects at IPL time to also the number of bits
-in cpu_possible_map. If it is desired to add additional cpus at a later time
-the number should be specified using this option or the possible_cpus option.
-
possible_cpus=n [s390 only] use this to set hotpluggable cpus.
This option sets possible_cpus bits in
cpu_possible_map. Thus keeping the numbers of bits set
constant even if the machine gets rebooted.
- This option overrides additional_cpus.
CPU maps and such
-----------------
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index e6244cde26e9..05c80645e4ee 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -2560,9 +2560,6 @@ Your cooperation is appreciated.
96 = /dev/usb/hiddev0 1st USB HID device
...
111 = /dev/usb/hiddev15 16th USB HID device
- 112 = /dev/usb/auer0 1st auerswald ISDN device
- ...
- 127 = /dev/usb/auer15 16th auerswald ISDN device
128 = /dev/usb/brlvgr0 First Braille Voyager device
...
131 = /dev/usb/brlvgr3 Fourth Braille Voyager device
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 17cab3c74e8b..eb1a47b97427 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -19,15 +19,6 @@ Who: Pavel Machek <pavel@suse.cz>
---------------------------
-What: old NCR53C9x driver
-When: October 2007
-Why: Replaced by the much better esp_scsi driver. Actual low-level
- driver can be ported over almost trivially.
-Who: David Miller <davem@davemloft.net>
- Christoph Hellwig <hch@lst.de>
-
----------------------------
-
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
When: December 2008
Files: include/linux/video_decoder.h include/linux/videodev.h
diff --git a/Documentation/filesystems/configfs/Makefile b/Documentation/filesystems/configfs/Makefile
new file mode 100644
index 000000000000..be7ec5e67dbc
--- /dev/null
+++ b/Documentation/filesystems/configfs/Makefile
@@ -0,0 +1,3 @@
+ifneq ($(CONFIG_CONFIGFS_FS),)
+obj-m += configfs_example_explicit.o configfs_example_macros.o
+endif
diff --git a/Documentation/filesystems/quota.txt b/Documentation/filesystems/quota.txt
index a590c4093eff..5e8de25bf0f1 100644
--- a/Documentation/filesystems/quota.txt
+++ b/Documentation/filesystems/quota.txt
@@ -3,14 +3,14 @@ Quota subsystem
===============
Quota subsystem allows system administrator to set limits on used space and
-number of used inodes (inode is a filesystem structure which is associated
-with each file or directory) for users and/or groups. For both used space and
-number of used inodes there are actually two limits. The first one is called
-softlimit and the second one hardlimit. An user can never exceed a hardlimit
-for any resource. User is allowed to exceed softlimit but only for limited
-period of time. This period is called "grace period" or "grace time". When
-grace time is over, user is not able to allocate more space/inodes until he
-frees enough of them to get below softlimit.
+number of used inodes (inode is a filesystem structure which is associated with
+each file or directory) for users and/or groups. For both used space and number
+of used inodes there are actually two limits. The first one is called softlimit
+and the second one hardlimit. An user can never exceed a hardlimit for any
+resource (unless he has CAP_SYS_RESOURCE capability). User is allowed to exceed
+softlimit but only for limited period of time. This period is called "grace
+period" or "grace time". When grace time is over, user is not able to allocate
+more space/inodes until he frees enough of them to get below softlimit.
Quota limits (and amount of grace time) are set independently for each
filesystem.
@@ -53,6 +53,12 @@ in parentheses):
QUOTA_NL_BSOFTLONGWARN - space (block) softlimit is exceeded
longer than given grace period.
QUOTA_NL_BSOFTWARN - space (block) softlimit
+ - four warnings are also defined for the event when user stops
+ exceeding some limit:
+ QUOTA_NL_IHARDBELOW - inode hardlimit
+ QUOTA_NL_ISOFTBELOW - inode softlimit
+ QUOTA_NL_BHARDBELOW - space (block) hardlimit
+ QUOTA_NL_BSOFTBELOW - space (block) softlimit
QUOTA_NL_A_DEV_MAJOR (u32)
- major number of a device with the affected filesystem
QUOTA_NL_A_DEV_MINOR (u32)
diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt
index 540e9e7f59c5..6a0d70a22f05 100644
--- a/Documentation/filesystems/ubifs.txt
+++ b/Documentation/filesystems/ubifs.txt
@@ -57,7 +57,7 @@ Similarly to JFFS2, UBIFS supports on-the-flight compression which makes
it possible to fit quite a lot of data to the flash.
Similarly to JFFS2, UBIFS is tolerant of unclean reboots and power-cuts.
-It does not need stuff like ckfs.ext2. UBIFS automatically replays its
+It does not need stuff like fsck.ext2. UBIFS automatically replays its
journal and recovers from crashes, ensuring that the on-flash data
structures are consistent.
diff --git a/Documentation/hwmon/ibmaem b/Documentation/hwmon/ibmaem
index 2fefaf582a43..e98bdfea3467 100644
--- a/Documentation/hwmon/ibmaem
+++ b/Documentation/hwmon/ibmaem
@@ -1,8 +1,11 @@
Kernel driver ibmaem
======================
+This driver talks to the IBM Systems Director Active Energy Manager, known
+henceforth as AEM.
+
Supported systems:
- * Any recent IBM System X server with Active Energy Manager support.
+ * Any recent IBM System X server with AEM support.
This includes the x3350, x3550, x3650, x3655, x3755, x3850 M2,
x3950 M2, and certain HS2x/LS2x/QS2x blades. The IPMI host interface
driver ("ipmi-si") needs to be loaded for this driver to do anything.
@@ -14,24 +17,22 @@ Author: Darrick J. Wong
Description
-----------
-This driver implements sensor reading support for the energy and power
-meters available on various IBM System X hardware through the BMC. All
-sensor banks will be exported as platform devices; this driver can talk
-to both v1 and v2 interfaces. This driver is completely separate from the
-older ibmpex driver.
+This driver implements sensor reading support for the energy and power meters
+available on various IBM System X hardware through the BMC. All sensor banks
+will be exported as platform devices; this driver can talk to both v1 and v2
+interfaces. This driver is completely separate from the older ibmpex driver.
-The v1 AEM interface has a simple set of features to monitor energy use.
-There is a register that displays an estimate of raw energy consumption
-since the last BMC reset, and a power sensor that returns average power
-use over a configurable interval.
+The v1 AEM interface has a simple set of features to monitor energy use. There
+is a register that displays an estimate of raw energy consumption since the
+last BMC reset, and a power sensor that returns average power use over a
+configurable interval.
-The v2 AEM interface is a bit more sophisticated, being able to present
-a wider range of energy and power use registers, the power cap as
-set by the AEM software, and temperature sensors.
+The v2 AEM interface is a bit more sophisticated, being able to present a wider
+range of energy and power use registers, the power cap as set by the AEM
+software, and temperature sensors.
Special Features
----------------
-The "power_cap" value displays the current system power cap, as set by
-the Active Energy Manager software. Setting the power cap from the host
-is not currently supported.
+The "power_cap" value displays the current system power cap, as set by the AEM
+software. Setting the power cap from the host is not currently supported.
diff --git a/Documentation/ia64/Makefile b/Documentation/ia64/Makefile
new file mode 100644
index 000000000000..b75db69ec483
--- /dev/null
+++ b/Documentation/ia64/Makefile
@@ -0,0 +1,8 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := aliasing-test
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index 3bb5f466a90d..1c6b545635a2 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -105,7 +105,6 @@ Code Seq# Include File Comments
'T' all linux/soundcard.h conflict!
'T' all asm-i386/ioctls.h conflict!
'U' 00-EF linux/drivers/usb/usb.h
-'U' F0-FF drivers/usb/auerswald.c
'V' all linux/vt.h
'W' 00-1F linux/watchdog.h conflict!
'W' 00-1F linux/wanrouter.h conflict!
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index b88b0ea54e90..655414821edc 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -1447,21 +1447,6 @@ static void configure_device(int fd, const char *tapif, u32 ipaddr)
err(1, "Bringing interface %s up", tapif);
}
-static void get_mac(int fd, const char *tapif, unsigned char hwaddr[6])
-{
- struct ifreq ifr;
-
- memset(&ifr, 0, sizeof(ifr));
- strcpy(ifr.ifr_name, tapif);
-
- /* SIOC stands for Socket I/O Control. G means Get (vs S for Set
- * above). IF means Interface, and HWADDR is hardware address.
- * Simple! */
- if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0)
- err(1, "getting hw address for %s", tapif);
- memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6);
-}
-
static int get_tun_device(char tapif[IFNAMSIZ])
{
struct ifreq ifr;
@@ -1531,11 +1516,8 @@ static void setup_tun_net(char *arg)
p = strchr(arg, ':');
if (p) {
str2mac(p+1, conf.mac);
+ add_feature(dev, VIRTIO_NET_F_MAC);
*p = '\0';
- } else {
- p = arg + strlen(arg);
- /* None supplied; query the randomly assigned mac. */
- get_mac(ipfd, tapif, conf.mac);
}
/* arg is now either an IP address or a bridge name */
@@ -1547,13 +1529,10 @@ static void setup_tun_net(char *arg)
/* Set up the tun device. */
configure_device(ipfd, tapif, ip);
- /* Tell Guest what MAC address to use. */
- add_feature(dev, VIRTIO_NET_F_MAC);
add_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY);
/* Expect Guest to handle everything except UFO */
add_feature(dev, VIRTIO_NET_F_CSUM);
add_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
- add_feature(dev, VIRTIO_NET_F_MAC);
add_feature(dev, VIRTIO_NET_F_GUEST_TSO4);
add_feature(dev, VIRTIO_NET_F_GUEST_TSO6);
add_feature(dev, VIRTIO_NET_F_GUEST_ECN);
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile
new file mode 100644
index 000000000000..6d8af1ac56c4
--- /dev/null
+++ b/Documentation/networking/Makefile
@@ -0,0 +1,8 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := ifenslave
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c
index a12059886755..1b96ccda3836 100644
--- a/Documentation/networking/ifenslave.c
+++ b/Documentation/networking/ifenslave.c
@@ -1081,7 +1081,7 @@ static int set_if_addr(char *master_ifname, char *slave_ifname)
}
- ipaddr = ifr.ifr_addr.sa_data;
+ ipaddr = (unsigned char *)ifr.ifr_addr.sa_data;
v_print("Interface '%s': set IP %s to %d.%d.%d.%d\n",
slave_ifname, ifra[i].desc,
ipaddr[0], ipaddr[1], ipaddr[2], ipaddr[3]);
diff --git a/Documentation/pcmcia/Makefile b/Documentation/pcmcia/Makefile
new file mode 100644
index 000000000000..accde871ae77
--- /dev/null
+++ b/Documentation/pcmcia/Makefile
@@ -0,0 +1,10 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := crc32hash
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
+
+HOSTCFLAGS_crc32hash.o += -I$(objtree)/usr/include
diff --git a/Documentation/pcmcia/crc32hash.c b/Documentation/pcmcia/crc32hash.c
index cbc36d299af8..4210e5abab8a 100644
--- a/Documentation/pcmcia/crc32hash.c
+++ b/Documentation/pcmcia/crc32hash.c
@@ -26,7 +26,7 @@ int main(int argc, char **argv) {
printf("no string passed as argument\n");
return -1;
}
- result = crc32(argv[1], strlen(argv[1]));
+ result = crc32((unsigned char const *)argv[1], strlen(argv[1]));
printf("0x%x\n", result);
return 0;
}
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 6f6d117ac7e2..b117e42a6166 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -1144,8 +1144,6 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
This module supports autoprobe and multiple cards.
- Power management is _not_ supported.
-
Module snd-ice1712
------------------
@@ -1628,8 +1626,6 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
This module supports autoprobe and multiple cards.
- Power management is _not_ supported.
-
Module snd-pcsp
-----------------
@@ -2081,13 +2077,11 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
Module snd-virtuoso
-------------------
- Module for sound cards based on the Asus AV200 chip, i.e.,
- Xonar D2 and Xonar D2X.
+ Module for sound cards based on the Asus AV100/AV200 chips,
+ i.e., Xonar D1, DX, D2 and D2X.
This module supports autoprobe and multiple cards.
- Power management is _not_ supported.
-
Module snd-vx222
----------------
diff --git a/Documentation/spi/Makefile b/Documentation/spi/Makefile
new file mode 100644
index 000000000000..a5b03c88beae
--- /dev/null
+++ b/Documentation/spi/Makefile
@@ -0,0 +1,11 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := spidev_test spidev_fdx
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
+
+HOSTCFLAGS_spidev_test.o += -I$(objtree)/usr/include
+HOSTCFLAGS_spidev_fdx.o += -I$(objtree)/usr/include
diff --git a/Documentation/usb/auerswald.txt b/Documentation/usb/auerswald.txt
deleted file mode 100644
index 7ee4d8f69116..000000000000
--- a/Documentation/usb/auerswald.txt
+++ /dev/null
@@ -1,30 +0,0 @@
- Auerswald USB kernel driver
- ===========================
-
-What is it? What can I do with it?
-==================================
-The auerswald USB kernel driver connects your linux 2.4.x
-system to the auerswald usb-enabled devices.
-
-There are two types of auerswald usb devices:
-a) small PBX systems (ISDN)
-b) COMfort system telephones (ISDN)
-
-The driver installation creates the devices
-/dev/usb/auer0..15. These devices carry a vendor-
-specific protocol. You may run all auerswald java
-software on it. The java software needs a native
-library "libAuerUsbJNINative.so" installed on
-your system. This library is available from
-auerswald and shipped as part of the java software.
-
-You may create the devices with:
- mknod -m 666 /dev/usb/auer0 c 180 112
- ...
- mknod -m 666 /dev/usb/auer15 c 180 127
-
-Future plans
-============
-- Connection to ISDN4LINUX (the hisax interface)
-
-The maintainer of this driver is wolfgang@iksw-muees.de
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index b2fc4d4a9917..9d31140e3f5b 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -436,7 +436,12 @@ post_reset; the USB core guarantees that this is true of internal
suspend/resume events as well.
If a driver wants to block all suspend/resume calls during some
-critical section, it can simply acquire udev->pm_mutex.
+critical section, it can simply acquire udev->pm_mutex. Note that
+calls to resume may be triggered indirectly. Block IO due to memory
+allocations can make the vm subsystem resume a device. Thus while
+holding this lock you must not allocate memory with GFP_KERNEL or
+GFP_NOFS.
+
Alternatively, if the critical section might call some of the
usb_autopm_* routines, the driver can avoid deadlock by doing:
diff --git a/Documentation/video4linux/Makefile b/Documentation/video4linux/Makefile
new file mode 100644
index 000000000000..1ed0e98d057d
--- /dev/null
+++ b/Documentation/video4linux/Makefile
@@ -0,0 +1,8 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := v4lgrab
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
diff --git a/Documentation/vm/Makefile b/Documentation/vm/Makefile
new file mode 100644
index 000000000000..6f562f778b28
--- /dev/null
+++ b/Documentation/vm/Makefile
@@ -0,0 +1,8 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := slabinfo
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration
index 99f89aa10169..d5fdfd34bbaf 100644
--- a/Documentation/vm/page_migration
+++ b/Documentation/vm/page_migration
@@ -18,10 +18,11 @@ migrate_pages function call takes two sets of nodes and moves pages of a
process that are located on the from nodes to the destination nodes.
Page migration functions are provided by the numactl package by Andi Kleen
(a version later than 0.9.3 is required. Get it from
-ftp://ftp.suse.com/pub/people/ak). numactl provided libnuma which
-provides an interface similar to other numa functionality for page migration.
-cat /proc/<pid>/numa_maps allows an easy review of where the pages of
-a process are located. See also the numa_maps manpage in the numactl package.
+ftp://oss.sgi.com/www/projects/libnuma/download/). numactl provides libnuma
+which provides an interface similar to other numa functionality for page
+migration. cat /proc/<pid>/numa_maps allows an easy review of where the
+pages of a process are located. See also the numa_maps documentation in the
+proc(5) man page.
Manual migration is useful if for example the scheduler has relocated
a process to a processor on a distant node. A batch scheduler or an
diff --git a/Documentation/watchdog/src/Makefile b/Documentation/watchdog/src/Makefile
new file mode 100644
index 000000000000..40e5f46e4740
--- /dev/null
+++ b/Documentation/watchdog/src/Makefile
@@ -0,0 +1,8 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := watchdog-simple watchdog-test
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
diff --git a/MAINTAINERS b/MAINTAINERS
index b343814a1ace..663485b004fb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -175,12 +175,18 @@ M: bcrl@kvack.org
L: linux-aio@kvack.org
S: Supported
-ABIT UGURU HARDWARE MONITOR DRIVER
+ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
P: Hans de Goede
M: j.w.r.degoede@hhs.nl
L: lm-sensors@lm-sensors.org
S: Maintained
+ABIT UGURU 3 HARDWARE MONITOR DRIVER
+P: Alistair John Strachan
+M: alistair@devzero.co.uk
+L: lm-sensors@lm-sensors.org
+S: Maintained
+
ACENIC DRIVER
P: Jes Sorensen
M: jes@trained-monkey.org
@@ -2462,7 +2468,7 @@ L: kernel-janitors@vger.kernel.org
W: http://www.kerneljanitors.org/
S: Maintained
-KERNEL NFSD
+KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
P: J. Bruce Fields
M: bfields@fieldses.org
P: Neil Brown
@@ -2928,6 +2934,12 @@ M: jirislaby@gmail.com
L: linux-kernel@vger.kernel.org
S: Maintained
+MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
+P: Felipe Balbi
+M: felipe.balbi@nokia.com
+L: linux-usb@vger.kernel.org
+S: Maintained
+
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
P: Andrew Gallatin
M: gallatin@myri.com
@@ -3076,9 +3088,10 @@ M: horms@verge.net.au
P: Julian Anastasov
M: ja@ssi.bg
L: netdev@vger.kernel.org
+L: lvs-devel@vger.kernel.org
S: Maintained
-NFS CLIENT
+NFS, SUNRPC, AND LOCKD CLIENTS
P: Trond Myklebust
M: Trond.Myklebust@netapp.com
L: linux-nfs@vger.kernel.org
@@ -3741,6 +3754,16 @@ L: linux-visws-devel@lists.sf.net
W: http://linux-visws.sf.net
S: Maintained for 2.6.
+SGI GRU DRIVER
+P: Jack Steiner
+M: steiner@sgi.com
+S: Maintained
+
+SGI XP/XPC/XPNET DRIVER
+P: Dean Nelson
+M: dcn@sgi.com
+S: Maintained
+
SIMTEC EB110ATX (Chalice CATS)
P: Ben Dooks
P: Vincent Sanders
@@ -4195,12 +4218,6 @@ M: oliver@neukum.name
L: linux-usb@vger.kernel.org
S: Maintained
-USB AUERSWALD DRIVER
-P: Wolfgang Muees
-M: wolfgang@iksw-muees.de
-L: linux-usb@vger.kernel.org
-S: Maintained
-
USB BLOCK DRIVER (UB ub)
P: Pete Zaitcev
M: zaitcev@redhat.com
diff --git a/Makefile b/Makefile
index f3e206509ee1..53bf6ec1af9d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 27
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Rotary Wombat
# *DOCUMENTATION*
@@ -822,6 +822,9 @@ endif
ifdef CONFIG_SAMPLES
$(Q)$(MAKE) $(build)=samples
endif
+ifdef CONFIG_BUILD_DOCSRC
+ $(Q)$(MAKE) $(build)=Documentation
+endif
$(call vmlinux-modpost)
$(call if_changed_rule,vmlinux__)
$(Q)rm -f .old_version
@@ -1166,7 +1169,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
#
clean: rm-dirs := $(CLEAN_DIRS)
clean: rm-files := $(CLEAN_FILES)
-clean-dirs := $(addprefix _clean_,$(srctree) $(vmlinux-alldirs))
+clean-dirs := $(addprefix _clean_,$(srctree) $(vmlinux-alldirs) Documentation)
PHONY += $(clean-dirs) clean archclean
$(clean-dirs):
diff --git a/include/asm-alpha/8253pit.h b/arch/alpha/include/asm/8253pit.h
index fef5c1450e47..fef5c1450e47 100644
--- a/include/asm-alpha/8253pit.h
+++ b/arch/alpha/include/asm/8253pit.h
diff --git a/include/asm-alpha/Kbuild b/arch/alpha/include/asm/Kbuild
index b7c8f188b313..b7c8f188b313 100644
--- a/include/asm-alpha/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
diff --git a/include/asm-alpha/a.out-core.h b/arch/alpha/include/asm/a.out-core.h
index 9e33e92e524c..9e33e92e524c 100644
--- a/include/asm-alpha/a.out-core.h
+++ b/arch/alpha/include/asm/a.out-core.h
diff --git a/include/asm-alpha/a.out.h b/arch/alpha/include/asm/a.out.h
index 02ce8473870a..02ce8473870a 100644
--- a/include/asm-alpha/a.out.h
+++ b/arch/alpha/include/asm/a.out.h
diff --git a/include/asm-alpha/agp.h b/arch/alpha/include/asm/agp.h
index 26c179135293..26c179135293 100644
--- a/include/asm-alpha/agp.h
+++ b/arch/alpha/include/asm/agp.h
diff --git a/include/asm-alpha/agp_backend.h b/arch/alpha/include/asm/agp_backend.h
index 55dd44a2cea7..55dd44a2cea7 100644
--- a/include/asm-alpha/agp_backend.h
+++ b/arch/alpha/include/asm/agp_backend.h
diff --git a/include/asm-alpha/atomic.h b/arch/alpha/include/asm/atomic.h
index ca88e54dec93..ca88e54dec93 100644
--- a/include/asm-alpha/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
diff --git a/include/asm-alpha/auxvec.h b/arch/alpha/include/asm/auxvec.h
index e96fe880e310..e96fe880e310 100644
--- a/include/asm-alpha/auxvec.h
+++ b/arch/alpha/include/asm/auxvec.h
diff --git a/include/asm-alpha/barrier.h b/arch/alpha/include/asm/barrier.h
index ac78eba909bc..ac78eba909bc 100644
--- a/include/asm-alpha/barrier.h
+++ b/arch/alpha/include/asm/barrier.h
diff --git a/include/asm-alpha/bitops.h b/arch/alpha/include/asm/bitops.h
index 15f3ae25c511..15f3ae25c511 100644
--- a/include/asm-alpha/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
diff --git a/include/asm-alpha/bug.h b/arch/alpha/include/asm/bug.h
index 695a5ee4b5d3..695a5ee4b5d3 100644
--- a/include/asm-alpha/bug.h
+++ b/arch/alpha/include/asm/bug.h
diff --git a/include/asm-alpha/bugs.h b/arch/alpha/include/asm/bugs.h
index 78030d1c7e7e..78030d1c7e7e 100644
--- a/include/asm-alpha/bugs.h
+++ b/arch/alpha/include/asm/bugs.h
diff --git a/include/asm-alpha/byteorder.h b/arch/alpha/include/asm/byteorder.h
index 58e958fc7f1b..58e958fc7f1b 100644
--- a/include/asm-alpha/byteorder.h
+++ b/arch/alpha/include/asm/byteorder.h
diff --git a/include/asm-alpha/cache.h b/arch/alpha/include/asm/cache.h
index f199e69a5d0b..f199e69a5d0b 100644
--- a/include/asm-alpha/cache.h
+++ b/arch/alpha/include/asm/cache.h
diff --git a/include/asm-alpha/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index b686cc7fc44e..b686cc7fc44e 100644
--- a/include/asm-alpha/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
diff --git a/include/asm-alpha/checksum.h b/arch/alpha/include/asm/checksum.h
index d3854bbf0a9e..d3854bbf0a9e 100644
--- a/include/asm-alpha/checksum.h
+++ b/arch/alpha/include/asm/checksum.h
diff --git a/include/asm-alpha/compiler.h b/arch/alpha/include/asm/compiler.h
index da6bb199839c..da6bb199839c 100644
--- a/include/asm-alpha/compiler.h
+++ b/arch/alpha/include/asm/compiler.h
diff --git a/include/asm-alpha/console.h b/arch/alpha/include/asm/console.h
index a3ce4e62249b..a3ce4e62249b 100644
--- a/include/asm-alpha/console.h
+++ b/arch/alpha/include/asm/console.h
diff --git a/include/asm-alpha/core_apecs.h b/arch/alpha/include/asm/core_apecs.h
index 6785ff7e02bc..6785ff7e02bc 100644
--- a/include/asm-alpha/core_apecs.h
+++ b/arch/alpha/include/asm/core_apecs.h
diff --git a/include/asm-alpha/core_cia.h b/arch/alpha/include/asm/core_cia.h
index 9e0516c0ca27..9e0516c0ca27 100644
--- a/include/asm-alpha/core_cia.h
+++ b/arch/alpha/include/asm/core_cia.h
diff --git a/include/asm-alpha/core_irongate.h b/arch/alpha/include/asm/core_irongate.h
index 24b2db541501..24b2db541501 100644
--- a/include/asm-alpha/core_irongate.h
+++ b/arch/alpha/include/asm/core_irongate.h
diff --git a/include/asm-alpha/core_lca.h b/arch/alpha/include/asm/core_lca.h
index f7cb4b460954..f7cb4b460954 100644
--- a/include/asm-alpha/core_lca.h
+++ b/arch/alpha/include/asm/core_lca.h
diff --git a/include/asm-alpha/core_marvel.h b/arch/alpha/include/asm/core_marvel.h
index 30d55fe7aaf6..30d55fe7aaf6 100644
--- a/include/asm-alpha/core_marvel.h
+++ b/arch/alpha/include/asm/core_marvel.h
diff --git a/include/asm-alpha/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h
index acf55b483472..acf55b483472 100644
--- a/include/asm-alpha/core_mcpcia.h
+++ b/arch/alpha/include/asm/core_mcpcia.h
diff --git a/include/asm-alpha/core_polaris.h b/arch/alpha/include/asm/core_polaris.h
index 2f966b64659d..2f966b64659d 100644
--- a/include/asm-alpha/core_polaris.h
+++ b/arch/alpha/include/asm/core_polaris.h
diff --git a/include/asm-alpha/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 46bfff58f670..46bfff58f670 100644
--- a/include/asm-alpha/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
diff --git a/include/asm-alpha/core_titan.h b/arch/alpha/include/asm/core_titan.h
index a17f6f33b68e..a17f6f33b68e 100644
--- a/include/asm-alpha/core_titan.h
+++ b/arch/alpha/include/asm/core_titan.h
diff --git a/include/asm-alpha/core_tsunami.h b/arch/alpha/include/asm/core_tsunami.h
index 58d4fe48742c..58d4fe48742c 100644
--- a/include/asm-alpha/core_tsunami.h
+++ b/arch/alpha/include/asm/core_tsunami.h
diff --git a/include/asm-alpha/core_wildfire.h b/arch/alpha/include/asm/core_wildfire.h
index cd562f544ba2..cd562f544ba2 100644
--- a/include/asm-alpha/core_wildfire.h
+++ b/arch/alpha/include/asm/core_wildfire.h
diff --git a/include/asm-alpha/cputime.h b/arch/alpha/include/asm/cputime.h
index 19577fd93230..19577fd93230 100644
--- a/include/asm-alpha/cputime.h
+++ b/arch/alpha/include/asm/cputime.h
diff --git a/include/asm-alpha/current.h b/arch/alpha/include/asm/current.h
index 094d285a1b34..094d285a1b34 100644
--- a/include/asm-alpha/current.h
+++ b/arch/alpha/include/asm/current.h
diff --git a/include/asm-alpha/delay.h b/arch/alpha/include/asm/delay.h
index 2aa3f410f7e6..2aa3f410f7e6 100644
--- a/include/asm-alpha/delay.h
+++ b/arch/alpha/include/asm/delay.h
diff --git a/include/asm-alpha/device.h b/arch/alpha/include/asm/device.h
index d8f9872b0e2d..d8f9872b0e2d 100644
--- a/include/asm-alpha/device.h
+++ b/arch/alpha/include/asm/device.h
diff --git a/include/asm-alpha/div64.h b/arch/alpha/include/asm/div64.h
index 6cd978cefb28..6cd978cefb28 100644
--- a/include/asm-alpha/div64.h
+++ b/arch/alpha/include/asm/div64.h
diff --git a/include/asm-alpha/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index a5801ae02e4b..a5801ae02e4b 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
diff --git a/include/asm-alpha/dma.h b/arch/alpha/include/asm/dma.h
index 87cfdbdf08fc..87cfdbdf08fc 100644
--- a/include/asm-alpha/dma.h
+++ b/arch/alpha/include/asm/dma.h
diff --git a/include/asm-alpha/elf.h b/arch/alpha/include/asm/elf.h
index fc1002ea1e0c..fc1002ea1e0c 100644
--- a/include/asm-alpha/elf.h
+++ b/arch/alpha/include/asm/elf.h
diff --git a/include/asm-alpha/emergency-restart.h b/arch/alpha/include/asm/emergency-restart.h
index 108d8c48e42e..108d8c48e42e 100644
--- a/include/asm-alpha/emergency-restart.h
+++ b/arch/alpha/include/asm/emergency-restart.h
diff --git a/include/asm-alpha/err_common.h b/arch/alpha/include/asm/err_common.h
index c25095942107..c25095942107 100644
--- a/include/asm-alpha/err_common.h
+++ b/arch/alpha/include/asm/err_common.h
diff --git a/include/asm-alpha/err_ev6.h b/arch/alpha/include/asm/err_ev6.h
index ea637791e4a9..ea637791e4a9 100644
--- a/include/asm-alpha/err_ev6.h
+++ b/arch/alpha/include/asm/err_ev6.h
diff --git a/include/asm-alpha/err_ev7.h b/arch/alpha/include/asm/err_ev7.h
index 87f99777c2e4..87f99777c2e4 100644
--- a/include/asm-alpha/err_ev7.h
+++ b/arch/alpha/include/asm/err_ev7.h
diff --git a/include/asm-alpha/errno.h b/arch/alpha/include/asm/errno.h
index 69e2655249d2..69e2655249d2 100644
--- a/include/asm-alpha/errno.h
+++ b/arch/alpha/include/asm/errno.h
diff --git a/include/asm-alpha/fb.h b/arch/alpha/include/asm/fb.h
index fa9bbb96b2b3..fa9bbb96b2b3 100644
--- a/include/asm-alpha/fb.h
+++ b/arch/alpha/include/asm/fb.h
diff --git a/include/asm-alpha/fcntl.h b/arch/alpha/include/asm/fcntl.h
index 25da0017ec87..25da0017ec87 100644
--- a/include/asm-alpha/fcntl.h
+++ b/arch/alpha/include/asm/fcntl.h
diff --git a/include/asm-alpha/floppy.h b/arch/alpha/include/asm/floppy.h
index 0be50413b2b5..0be50413b2b5 100644
--- a/include/asm-alpha/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
diff --git a/include/asm-alpha/fpu.h b/arch/alpha/include/asm/fpu.h
index ecb17a72acc3..ecb17a72acc3 100644
--- a/include/asm-alpha/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
diff --git a/include/asm-alpha/futex.h b/arch/alpha/include/asm/futex.h
index 6a332a9f099c..6a332a9f099c 100644
--- a/include/asm-alpha/futex.h
+++ b/arch/alpha/include/asm/futex.h
diff --git a/include/asm-alpha/gct.h b/arch/alpha/include/asm/gct.h
index 3504c704927c..3504c704927c 100644
--- a/include/asm-alpha/gct.h
+++ b/arch/alpha/include/asm/gct.h
diff --git a/include/asm-alpha/gentrap.h b/arch/alpha/include/asm/gentrap.h
index ae50cc3192c7..ae50cc3192c7 100644
--- a/include/asm-alpha/gentrap.h
+++ b/arch/alpha/include/asm/gentrap.h
diff --git a/include/asm-alpha/hardirq.h b/arch/alpha/include/asm/hardirq.h
index d953e234daa8..d953e234daa8 100644
--- a/include/asm-alpha/hardirq.h
+++ b/arch/alpha/include/asm/hardirq.h
diff --git a/include/asm-alpha/hw_irq.h b/arch/alpha/include/asm/hw_irq.h
index a37db0f95092..a37db0f95092 100644
--- a/include/asm-alpha/hw_irq.h
+++ b/arch/alpha/include/asm/hw_irq.h
diff --git a/include/asm-alpha/hwrpb.h b/arch/alpha/include/asm/hwrpb.h
index 8e8f871af7cf..8e8f871af7cf 100644
--- a/include/asm-alpha/hwrpb.h
+++ b/arch/alpha/include/asm/hwrpb.h
diff --git a/include/asm-alpha/io.h b/arch/alpha/include/asm/io.h
index e971ab000f95..e971ab000f95 100644
--- a/include/asm-alpha/io.h
+++ b/arch/alpha/include/asm/io.h
diff --git a/include/asm-alpha/io_trivial.h b/arch/alpha/include/asm/io_trivial.h
index 1c77f10b4b36..1c77f10b4b36 100644
--- a/include/asm-alpha/io_trivial.h
+++ b/arch/alpha/include/asm/io_trivial.h
diff --git a/include/asm-alpha/ioctl.h b/arch/alpha/include/asm/ioctl.h
index fc63727f4178..fc63727f4178 100644
--- a/include/asm-alpha/ioctl.h
+++ b/arch/alpha/include/asm/ioctl.h
diff --git a/include/asm-alpha/ioctls.h b/arch/alpha/include/asm/ioctls.h
index 67bb9f6fdbe4..67bb9f6fdbe4 100644
--- a/include/asm-alpha/ioctls.h
+++ b/arch/alpha/include/asm/ioctls.h
diff --git a/include/asm-alpha/ipcbuf.h b/arch/alpha/include/asm/ipcbuf.h
index d9c0e1a50702..d9c0e1a50702 100644
--- a/include/asm-alpha/ipcbuf.h
+++ b/arch/alpha/include/asm/ipcbuf.h
diff --git a/include/asm-alpha/irq.h b/arch/alpha/include/asm/irq.h
index 06377400dc09..06377400dc09 100644
--- a/include/asm-alpha/irq.h
+++ b/arch/alpha/include/asm/irq.h
diff --git a/include/asm-alpha/irq_regs.h b/arch/alpha/include/asm/irq_regs.h
index 3dd9c0b70270..3dd9c0b70270 100644
--- a/include/asm-alpha/irq_regs.h
+++ b/arch/alpha/include/asm/irq_regs.h
diff --git a/include/asm-alpha/jensen.h b/arch/alpha/include/asm/jensen.h
index 964b06ead43b..964b06ead43b 100644
--- a/include/asm-alpha/jensen.h
+++ b/arch/alpha/include/asm/jensen.h
diff --git a/include/asm-alpha/kdebug.h b/arch/alpha/include/asm/kdebug.h
index 6ece1b037665..6ece1b037665 100644
--- a/include/asm-alpha/kdebug.h
+++ b/arch/alpha/include/asm/kdebug.h
diff --git a/include/asm-alpha/kmap_types.h b/arch/alpha/include/asm/kmap_types.h
index 3e6735a34c57..3e6735a34c57 100644
--- a/include/asm-alpha/kmap_types.h
+++ b/arch/alpha/include/asm/kmap_types.h
diff --git a/include/asm-alpha/linkage.h b/arch/alpha/include/asm/linkage.h
index 291c2d01c44f..291c2d01c44f 100644
--- a/include/asm-alpha/linkage.h
+++ b/arch/alpha/include/asm/linkage.h
diff --git a/include/asm-alpha/local.h b/arch/alpha/include/asm/local.h
index 6ad3ea696421..6ad3ea696421 100644
--- a/include/asm-alpha/local.h
+++ b/arch/alpha/include/asm/local.h
diff --git a/include/asm-alpha/machvec.h b/arch/alpha/include/asm/machvec.h
index a86c083cdf7f..a86c083cdf7f 100644
--- a/include/asm-alpha/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
diff --git a/include/asm-alpha/mc146818rtc.h b/arch/alpha/include/asm/mc146818rtc.h
index 097703f1c8cb..097703f1c8cb 100644
--- a/include/asm-alpha/mc146818rtc.h
+++ b/arch/alpha/include/asm/mc146818rtc.h
diff --git a/include/asm-alpha/md.h b/arch/alpha/include/asm/md.h
index 6c9b8222a4f2..6c9b8222a4f2 100644
--- a/include/asm-alpha/md.h
+++ b/arch/alpha/include/asm/md.h
diff --git a/include/asm-alpha/mman.h b/arch/alpha/include/asm/mman.h
index 90d7c35d2867..90d7c35d2867 100644
--- a/include/asm-alpha/mman.h
+++ b/arch/alpha/include/asm/mman.h
diff --git a/include/asm-alpha/mmu.h b/arch/alpha/include/asm/mmu.h
index 3dc127779329..3dc127779329 100644
--- a/include/asm-alpha/mmu.h
+++ b/arch/alpha/include/asm/mmu.h
diff --git a/include/asm-alpha/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 86c08a02d239..86c08a02d239 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
diff --git a/include/asm-alpha/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 8af56ce346ad..8af56ce346ad 100644
--- a/include/asm-alpha/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
diff --git a/include/asm-alpha/module.h b/arch/alpha/include/asm/module.h
index 7b63743c534a..7b63743c534a 100644
--- a/include/asm-alpha/module.h
+++ b/arch/alpha/include/asm/module.h
diff --git a/include/asm-alpha/msgbuf.h b/arch/alpha/include/asm/msgbuf.h
index 98496501a2bb..98496501a2bb 100644
--- a/include/asm-alpha/msgbuf.h
+++ b/arch/alpha/include/asm/msgbuf.h
diff --git a/include/asm-alpha/mutex.h b/arch/alpha/include/asm/mutex.h
index 458c1f7fbc18..458c1f7fbc18 100644
--- a/include/asm-alpha/mutex.h
+++ b/arch/alpha/include/asm/mutex.h
diff --git a/include/asm-alpha/page.h b/arch/alpha/include/asm/page.h
index 0995f9d13417..0995f9d13417 100644
--- a/include/asm-alpha/page.h
+++ b/arch/alpha/include/asm/page.h
diff --git a/include/asm-alpha/pal.h b/arch/alpha/include/asm/pal.h
index 9b4ba0d6f00b..9b4ba0d6f00b 100644
--- a/include/asm-alpha/pal.h
+++ b/arch/alpha/include/asm/pal.h
diff --git a/include/asm-alpha/param.h b/arch/alpha/include/asm/param.h
index e691ecfedb2c..e691ecfedb2c 100644
--- a/include/asm-alpha/param.h
+++ b/arch/alpha/include/asm/param.h
diff --git a/include/asm-alpha/parport.h b/arch/alpha/include/asm/parport.h
index c5ee7cbb2fcd..c5ee7cbb2fcd 100644
--- a/include/asm-alpha/parport.h
+++ b/arch/alpha/include/asm/parport.h
diff --git a/include/asm-alpha/pci.h b/arch/alpha/include/asm/pci.h
index 2a14302c17a3..2a14302c17a3 100644
--- a/include/asm-alpha/pci.h
+++ b/arch/alpha/include/asm/pci.h
diff --git a/include/asm-alpha/percpu.h b/arch/alpha/include/asm/percpu.h
index 3495e8e00d70..3495e8e00d70 100644
--- a/include/asm-alpha/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
diff --git a/include/asm-alpha/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index fd090155dccd..fd090155dccd 100644
--- a/include/asm-alpha/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
diff --git a/include/asm-alpha/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 3f0c59f6d8aa..3f0c59f6d8aa 100644
--- a/include/asm-alpha/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
diff --git a/include/asm-alpha/poll.h b/arch/alpha/include/asm/poll.h
index c98509d3149e..c98509d3149e 100644
--- a/include/asm-alpha/poll.h
+++ b/arch/alpha/include/asm/poll.h
diff --git a/include/asm-alpha/posix_types.h b/arch/alpha/include/asm/posix_types.h
index db167413300b..db167413300b 100644
--- a/include/asm-alpha/posix_types.h
+++ b/arch/alpha/include/asm/posix_types.h
diff --git a/include/asm-alpha/processor.h b/arch/alpha/include/asm/processor.h
index 94afe5859301..94afe5859301 100644
--- a/include/asm-alpha/processor.h
+++ b/arch/alpha/include/asm/processor.h
diff --git a/include/asm-alpha/ptrace.h b/arch/alpha/include/asm/ptrace.h
index 32c7a5cddd59..32c7a5cddd59 100644
--- a/include/asm-alpha/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
diff --git a/include/asm-alpha/reg.h b/arch/alpha/include/asm/reg.h
index 86ff916fb069..86ff916fb069 100644
--- a/include/asm-alpha/reg.h
+++ b/arch/alpha/include/asm/reg.h
diff --git a/include/asm-alpha/regdef.h b/arch/alpha/include/asm/regdef.h
index 142df9c4f8b8..142df9c4f8b8 100644
--- a/include/asm-alpha/regdef.h
+++ b/arch/alpha/include/asm/regdef.h
diff --git a/include/asm-alpha/resource.h b/arch/alpha/include/asm/resource.h
index c10874ff5973..c10874ff5973 100644
--- a/include/asm-alpha/resource.h
+++ b/arch/alpha/include/asm/resource.h
diff --git a/include/asm-alpha/rtc.h b/arch/alpha/include/asm/rtc.h
index 4e854b1333eb..4e854b1333eb 100644
--- a/include/asm-alpha/rtc.h
+++ b/arch/alpha/include/asm/rtc.h
diff --git a/include/asm-alpha/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 1570c0b54336..1570c0b54336 100644
--- a/include/asm-alpha/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
diff --git a/include/asm-alpha/scatterlist.h b/arch/alpha/include/asm/scatterlist.h
index 440747ca6349..440747ca6349 100644
--- a/include/asm-alpha/scatterlist.h
+++ b/arch/alpha/include/asm/scatterlist.h
diff --git a/include/asm-alpha/sections.h b/arch/alpha/include/asm/sections.h
index 43b40edd6e44..43b40edd6e44 100644
--- a/include/asm-alpha/sections.h
+++ b/arch/alpha/include/asm/sections.h
diff --git a/include/asm-alpha/segment.h b/arch/alpha/include/asm/segment.h
index 0453d97daae7..0453d97daae7 100644
--- a/include/asm-alpha/segment.h
+++ b/arch/alpha/include/asm/segment.h
diff --git a/include/asm-alpha/sembuf.h b/arch/alpha/include/asm/sembuf.h
index 7b38b1534784..7b38b1534784 100644
--- a/include/asm-alpha/sembuf.h
+++ b/arch/alpha/include/asm/sembuf.h
diff --git a/include/asm-alpha/serial.h b/arch/alpha/include/asm/serial.h
index 9d263e8d8ccc..9d263e8d8ccc 100644
--- a/include/asm-alpha/serial.h
+++ b/arch/alpha/include/asm/serial.h
diff --git a/include/asm-alpha/setup.h b/arch/alpha/include/asm/setup.h
index 2e023a4aa317..2e023a4aa317 100644
--- a/include/asm-alpha/setup.h
+++ b/arch/alpha/include/asm/setup.h
diff --git a/include/asm-alpha/sfp-machine.h b/arch/alpha/include/asm/sfp-machine.h
index 5fe63afbd474..5fe63afbd474 100644
--- a/include/asm-alpha/sfp-machine.h
+++ b/arch/alpha/include/asm/sfp-machine.h
diff --git a/include/asm-alpha/shmbuf.h b/arch/alpha/include/asm/shmbuf.h
index 37ee84f05085..37ee84f05085 100644
--- a/include/asm-alpha/shmbuf.h
+++ b/arch/alpha/include/asm/shmbuf.h
diff --git a/include/asm-alpha/shmparam.h b/arch/alpha/include/asm/shmparam.h
index cc901d58aebb..cc901d58aebb 100644
--- a/include/asm-alpha/shmparam.h
+++ b/arch/alpha/include/asm/shmparam.h
diff --git a/include/asm-alpha/sigcontext.h b/arch/alpha/include/asm/sigcontext.h
index 323cdb026198..323cdb026198 100644
--- a/include/asm-alpha/sigcontext.h
+++ b/arch/alpha/include/asm/sigcontext.h
diff --git a/include/asm-alpha/siginfo.h b/arch/alpha/include/asm/siginfo.h
index 9822362a8424..9822362a8424 100644
--- a/include/asm-alpha/siginfo.h
+++ b/arch/alpha/include/asm/siginfo.h
diff --git a/include/asm-alpha/signal.h b/arch/alpha/include/asm/signal.h
index 13c2305d35ef..13c2305d35ef 100644
--- a/include/asm-alpha/signal.h
+++ b/arch/alpha/include/asm/signal.h
diff --git a/include/asm-alpha/smp.h b/arch/alpha/include/asm/smp.h
index 544c69af8168..544c69af8168 100644
--- a/include/asm-alpha/smp.h
+++ b/arch/alpha/include/asm/smp.h
diff --git a/include/asm-alpha/socket.h b/arch/alpha/include/asm/socket.h
index a1057c2d95e7..a1057c2d95e7 100644
--- a/include/asm-alpha/socket.h
+++ b/arch/alpha/include/asm/socket.h
diff --git a/include/asm-alpha/sockios.h b/arch/alpha/include/asm/sockios.h
index 7932c7ab4a4d..7932c7ab4a4d 100644
--- a/include/asm-alpha/sockios.h
+++ b/arch/alpha/include/asm/sockios.h
diff --git a/include/asm-alpha/spinlock.h b/arch/alpha/include/asm/spinlock.h
index aeeb125f6851..aeeb125f6851 100644
--- a/include/asm-alpha/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
diff --git a/include/asm-alpha/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 8141eb5ebf0d..8141eb5ebf0d 100644
--- a/include/asm-alpha/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
diff --git a/include/asm-alpha/stat.h b/arch/alpha/include/asm/stat.h
index 07ad3e6b3f3e..07ad3e6b3f3e 100644
--- a/include/asm-alpha/stat.h
+++ b/arch/alpha/include/asm/stat.h
diff --git a/include/asm-alpha/statfs.h b/arch/alpha/include/asm/statfs.h
index ad15830baefe..ad15830baefe 100644
--- a/include/asm-alpha/statfs.h
+++ b/arch/alpha/include/asm/statfs.h
diff --git a/include/asm-alpha/string.h b/arch/alpha/include/asm/string.h
index b02b8a282940..b02b8a282940 100644
--- a/include/asm-alpha/string.h
+++ b/arch/alpha/include/asm/string.h
diff --git a/include/asm-alpha/suspend.h b/arch/alpha/include/asm/suspend.h
index c7042d575851..c7042d575851 100644
--- a/include/asm-alpha/suspend.h
+++ b/arch/alpha/include/asm/suspend.h
diff --git a/include/asm-alpha/sysinfo.h b/arch/alpha/include/asm/sysinfo.h
index 086aba284df2..086aba284df2 100644
--- a/include/asm-alpha/sysinfo.h
+++ b/arch/alpha/include/asm/sysinfo.h
diff --git a/include/asm-alpha/system.h b/arch/alpha/include/asm/system.h
index afe20fa58c99..afe20fa58c99 100644
--- a/include/asm-alpha/system.h
+++ b/arch/alpha/include/asm/system.h
diff --git a/include/asm-alpha/termbits.h b/arch/alpha/include/asm/termbits.h
index ad854a4a3af6..ad854a4a3af6 100644
--- a/include/asm-alpha/termbits.h
+++ b/arch/alpha/include/asm/termbits.h
diff --git a/include/asm-alpha/termios.h b/arch/alpha/include/asm/termios.h
index fa13716a11c3..fa13716a11c3 100644
--- a/include/asm-alpha/termios.h
+++ b/arch/alpha/include/asm/termios.h
diff --git a/include/asm-alpha/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 15fda4344424..15fda4344424 100644
--- a/include/asm-alpha/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
diff --git a/include/asm-alpha/timex.h b/arch/alpha/include/asm/timex.h
index afa0c45e3e98..afa0c45e3e98 100644
--- a/include/asm-alpha/timex.h
+++ b/arch/alpha/include/asm/timex.h
diff --git a/include/asm-alpha/tlb.h b/arch/alpha/include/asm/tlb.h
index c13636575fba..c13636575fba 100644
--- a/include/asm-alpha/tlb.h
+++ b/arch/alpha/include/asm/tlb.h
diff --git a/include/asm-alpha/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index 9d87aaa08c0d..9d87aaa08c0d 100644
--- a/include/asm-alpha/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
diff --git a/include/asm-alpha/topology.h b/arch/alpha/include/asm/topology.h
index 149532e162c4..149532e162c4 100644
--- a/include/asm-alpha/topology.h
+++ b/arch/alpha/include/asm/topology.h
diff --git a/include/asm-alpha/types.h b/arch/alpha/include/asm/types.h
index c1541353ccef..c1541353ccef 100644
--- a/include/asm-alpha/types.h
+++ b/arch/alpha/include/asm/types.h
diff --git a/include/asm-alpha/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 22de3b434a22..22de3b434a22 100644
--- a/include/asm-alpha/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
diff --git a/include/asm-alpha/ucontext.h b/arch/alpha/include/asm/ucontext.h
index 47578ab42152..47578ab42152 100644
--- a/include/asm-alpha/ucontext.h
+++ b/arch/alpha/include/asm/ucontext.h
diff --git a/include/asm-alpha/unaligned.h b/arch/alpha/include/asm/unaligned.h
index 3787c60aed3f..3787c60aed3f 100644
--- a/include/asm-alpha/unaligned.h
+++ b/arch/alpha/include/asm/unaligned.h
diff --git a/include/asm-alpha/unistd.h b/arch/alpha/include/asm/unistd.h
index 5b5c17485942..5b5c17485942 100644
--- a/include/asm-alpha/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
diff --git a/include/asm-alpha/user.h b/arch/alpha/include/asm/user.h
index a4eb6a4ca8d1..a4eb6a4ca8d1 100644
--- a/include/asm-alpha/user.h
+++ b/arch/alpha/include/asm/user.h
diff --git a/include/asm-alpha/vga.h b/arch/alpha/include/asm/vga.h
index c00106bac521..c00106bac521 100644
--- a/include/asm-alpha/vga.h
+++ b/arch/alpha/include/asm/vga.h
diff --git a/include/asm-alpha/xor.h b/arch/alpha/include/asm/xor.h
index 5ee1c2bc0499..5ee1c2bc0499 100644
--- a/include/asm-alpha/xor.h
+++ b/arch/alpha/include/asm/xor.h
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index b15f927a5926..ab204db594d3 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,2 +1,3 @@
-piggy.gz
font.c
+piggy.gz
+vmlinux.lds
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 69130f365904..aecc6c3f908f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -246,9 +246,9 @@ map_single(struct device *dev, void *ptr, size_t size,
}
dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
+ "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
if ((dir == DMA_TO_DEVICE) ||
(dir == DMA_BIDIRECTIONAL)) {
@@ -292,9 +292,9 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
BUG_ON(buf->size != size);
dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
+ "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
DO_STATS ( device_info->bounce_count++ );
@@ -321,9 +321,8 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
}
}
-static inline void
-sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+static int sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
struct safe_buffer *buf = NULL;
@@ -355,9 +354,9 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
*/
dev_dbg(dev,
- "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
- __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
- buf->safe, (void *) buf->safe_dma_addr);
+ "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
DO_STATS ( device_info->bounce_count++ );
@@ -383,8 +382,9 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
* No need to sync the safe buffer - it was allocated
* via the coherent allocators.
*/
+ return 0;
} else {
- dma_cache_maint(dma_to_virt(dev, dma_addr), size, dir);
+ return 1;
}
}
@@ -474,25 +474,29 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
}
}
-void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, (void *) dma_addr, size, dir);
+ dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
+ __func__, dma_addr, offset, size, dir);
- sync_single(dev, dma_addr, size, dir);
+ if (sync_single(dev, dma_addr, offset + size, dir))
+ dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
}
+EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, (void *) dma_addr, size, dir);
+ dev_dbg(dev, "%s(dma=%#x,off=%#lx,size=%zx,dir=%x)\n",
+ __func__, dma_addr, offset, size, dir);
- sync_single(dev, dma_addr, size, dir);
+ if (sync_single(dev, dma_addr, offset + size, dir))
+ dma_cache_maint(dma_to_virt(dev, dma_addr) + offset, size, dir);
}
+EXPORT_SYMBOL(dma_sync_single_range_for_device);
void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
@@ -644,8 +648,6 @@ EXPORT_SYMBOL(dma_map_single);
EXPORT_SYMBOL(dma_unmap_single);
EXPORT_SYMBOL(dma_map_sg);
EXPORT_SYMBOL(dma_unmap_sg);
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-EXPORT_SYMBOL(dma_sync_single_for_device);
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
EXPORT_SYMBOL(dma_sync_sg_for_device);
EXPORT_SYMBOL(dmabounce_register_dev);
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 9578b5d9f9c7..1464ffe71717 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -757,7 +757,14 @@ CONFIG_INPUT_EVDEV=y
#
# Input Device Drivers
#
-# CONFIG_INPUT_KEYBOARD is not set
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -1111,11 +1118,11 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_RS5C372=y
# CONFIG_RTC_DRV_ISL1208 is not set
# CONFIG_RTC_DRV_X1205 is not set
-# CONFIG_RTC_DRV_PCF8563 is not set
+CONFIG_RTC_DRV_PCF8563=y
# CONFIG_RTC_DRV_PCF8583 is not set
CONFIG_RTC_DRV_M41T80=y
# CONFIG_RTC_DRV_M41T80_WDT is not set
-# CONFIG_RTC_DRV_S35390A is not set
+CONFIG_RTC_DRV_S35390A=y
#
# SPI RTC drivers
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 45329fca1b64..7b95d2058395 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -3,11 +3,48 @@
#ifdef __KERNEL__
-#include <linux/mm.h> /* need struct page */
-
+#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <asm-generic/dma-coherent.h>
+#include <asm/memory.h>
+
+/*
+ * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
+ * used internally by the DMA-mapping API to provide DMA addresses. They
+ * must not be used by drivers.
+ */
+#ifndef __arch_page_to_dma
+static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+{
+ return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
+}
+
+static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
+{
+ return (void *)__bus_to_virt(addr);
+}
+
+static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+{
+ return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
+}
+#else
+static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+{
+ return __arch_page_to_dma(dev, page);
+}
+
+static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
+{
+ return __arch_dma_to_virt(dev, addr);
+}
+
+static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+{
+ return __arch_virt_to_dma(dev, addr);
+}
+#endif
/*
* DMA-consistent mapping functions. These allocate/free a region of
@@ -169,7 +206,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
if (!arch_is_coherent())
dma_cache_maint(cpu_addr, size, dir);
- return virt_to_dma(dev, (unsigned long)cpu_addr);
+ return virt_to_dma(dev, cpu_addr);
}
#else
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
@@ -195,7 +232,7 @@ dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
- return dma_map_single(dev, page_address(page) + offset, size, (int)dir);
+ return dma_map_single(dev, page_address(page) + offset, size, dir);
}
/**
@@ -241,7 +278,7 @@ static inline void
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
- dma_unmap_single(dev, handle, size, (int)dir);
+ dma_unmap_single(dev, handle, size, dir);
}
/**
@@ -314,11 +351,12 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da
/**
- * dma_sync_single_for_cpu
+ * dma_sync_single_range_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
+ * @offset: offset of region to start sync
+ * @size: size of region to sync
+ * @dir: DMA transfer direction (same as passed to dma_map_single)
*
* Make physical memory consistent for a single streaming mode DMA
* translation after a transfer.
@@ -332,25 +370,41 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da
*/
#ifndef CONFIG_DMABOUNCE
static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
if (!arch_is_coherent())
- dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
+ dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
if (!arch_is_coherent())
- dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
+ dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
}
#else
-extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
-extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
+extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
+extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
#endif
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_device(dev, handle, 0, size, dir);
+}
+
/**
* dma_sync_sg_for_cpu
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index c8986bb99ed5..df15a0dc228e 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -10,7 +10,7 @@
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
-#define KEXEC_CONTROL_CODE_SIZE 4096
+#define KEXEC_CONTROL_PAGE_SIZE 4096
#define KEXEC_ARCH KEXEC_ARCH_ARM
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 1e070a2b561a..bf7c737c9226 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -150,6 +150,14 @@
#endif
/*
+ * Amount of memory reserved for the vmalloc() area, and minimum
+ * address for vmalloc mappings.
+ */
+extern unsigned long vmalloc_reserve;
+
+#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
+
+/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
@@ -306,20 +314,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/*
- * Optional device DMA address remapping. Do _not_ use directly!
- * We should really eliminate virt_to_bus() here - it's deprecated.
- */
-#ifndef __arch_page_to_dma
-#define page_to_dma(dev, page) ((dma_addr_t)__virt_to_bus((unsigned long)page_address(page)))
-#define dma_to_virt(dev, addr) ((void *)__bus_to_virt(addr))
-#define virt_to_dma(dev, addr) ((dma_addr_t)__virt_to_bus((unsigned long)(addr)))
-#else
-#define page_to_dma(dev, page) (__arch_page_to_dma(dev, page))
-#define dma_to_virt(dev, addr) (__arch_dma_to_virt(dev, addr))
-#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr))
-#endif
-
-/*
* Optional coherency support. Currently used only by selected
* Intel XSC3-based systems.
*/
diff --git a/arch/arm/include/asm/mtd-xip.h b/arch/arm/include/asm/mtd-xip.h
index 4225372a26f3..d8fbe2d9b8b9 100644
--- a/arch/arm/include/asm/mtd-xip.h
+++ b/arch/arm/include/asm/mtd-xip.h
@@ -10,8 +10,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * $Id: xip.h,v 1.2 2004/12/01 15:49:10 nico Exp $
*/
#ifndef __ARM_MTD_XIP_H__
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index b01d5e7e3d5a..517a4d6ffc74 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -112,9 +112,9 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
static inline void prefetch(const void *ptr)
{
__asm__ __volatile__(
- "pld\t%0"
+ "pld\t%a0"
:
- : "o" (*(char *)ptr)
+ : "p" (ptr)
: "cc");
}
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 0d0d40f1b599..b543a054a17e 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -54,6 +54,7 @@
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
* fr - Feroceon (v4wbi with non-outer-cacheable page table walks)
* v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
+ * v7wbi - identical to v6wbi
*/
#undef _TLB
#undef MULTI_TLB
@@ -266,14 +267,16 @@ extern struct cpu_tlb_fns cpu_tlb;
v4wbi_possible_flags | \
fr_possible_flags | \
v4wb_possible_flags | \
- v6wbi_possible_flags)
+ v6wbi_possible_flags | \
+ v7wbi_possible_flags)
#define always_tlb_flags (v3_always_flags & \
v4_always_flags & \
v4wbi_always_flags & \
fr_always_flags & \
v4wb_always_flags & \
- v6wbi_always_flags)
+ v6wbi_always_flags & \
+ v7wbi_always_flags)
#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index f95fbb2fcb5f..010618487cf1 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -381,6 +381,12 @@
#define __NR_fallocate (__NR_SYSCALL_BASE+352)
#define __NR_timerfd_settime (__NR_SYSCALL_BASE+353)
#define __NR_timerfd_gettime (__NR_SYSCALL_BASE+354)
+#define __NR_signalfd4 (__NR_SYSCALL_BASE+355)
+#define __NR_eventfd2 (__NR_SYSCALL_BASE+356)
+#define __NR_epoll_create1 (__NR_SYSCALL_BASE+357)
+#define __NR_dup3 (__NR_SYSCALL_BASE+358)
+#define __NR_pipe2 (__NR_SYSCALL_BASE+359)
+#define __NR_inotify_init1 (__NR_SYSCALL_BASE+360)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/.gitignore b/arch/arm/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/arm/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 30a67a5a40a8..09a061cb7838 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -262,10 +262,10 @@
/* 250 */ CALL(sys_epoll_create)
CALL(ABI(sys_epoll_ctl, sys_oabi_epoll_ctl))
CALL(ABI(sys_epoll_wait, sys_oabi_epoll_wait))
- CALL(sys_remap_file_pages)
+ CALL(sys_remap_file_pages)
CALL(sys_ni_syscall) /* sys_set_thread_area */
/* 255 */ CALL(sys_ni_syscall) /* sys_get_thread_area */
- CALL(sys_set_tid_address)
+ CALL(sys_set_tid_address)
CALL(sys_timer_create)
CALL(sys_timer_settime)
CALL(sys_timer_gettime)
@@ -364,6 +364,12 @@
CALL(sys_fallocate)
CALL(sys_timerfd_settime)
CALL(sys_timerfd_gettime)
+/* 355 */ CALL(sys_signalfd4)
+ CALL(sys_eventfd2)
+ CALL(sys_epoll_create1)
+ CALL(sys_dup3)
+ CALL(sys_pipe2)
+/* 360 */ CALL(sys_inotify_init1)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index db8f54a3451f..fae5beb3c3d6 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -71,7 +71,7 @@ void machine_kexec(struct kimage *image)
flush_icache_range((unsigned long) reboot_code_buffer,
- (unsigned long) reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
+ (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
cpu_proc_fin();
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 38f0e7940a13..2ca7038b67a7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,6 +81,8 @@ EXPORT_SYMBOL(system_serial_high);
unsigned int elf_hwcap;
EXPORT_SYMBOL(elf_hwcap);
+unsigned long __initdata vmalloc_reserve = 128 << 20;
+
#ifdef MULTI_CPU
struct processor processor;
@@ -501,6 +503,17 @@ static void __init early_mem(char **p)
__early_param("mem=", early_mem);
/*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the vmalloc
+ * area - the default is 128m.
+ */
+static void __init early_vmalloc(char **arg)
+{
+ vmalloc_reserve = memparse(*arg, arg);
+}
+__early_param("vmalloc=", early_vmalloc);
+
+/*
* Initial parsing of the command line.
*/
static void __init parse_cmdline(char **cmdline_p, char *from)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 7277aef83098..872f1f8fbb57 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -288,14 +288,28 @@ void unregister_undef_hook(struct undef_hook *hook)
spin_unlock_irqrestore(&undef_lock, flags);
}
+static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+{
+ struct undef_hook *hook;
+ unsigned long flags;
+ int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
+
+ spin_lock_irqsave(&undef_lock, flags);
+ list_for_each_entry(hook, &undef_hook, node)
+ if ((instr & hook->instr_mask) == hook->instr_val &&
+ (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
+ fn = hook->fn;
+ spin_unlock_irqrestore(&undef_lock, flags);
+
+ return fn ? fn(regs, instr) : 1;
+}
+
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
{
unsigned int correction = thumb_mode(regs) ? 2 : 4;
unsigned int instr;
- struct undef_hook *hook;
siginfo_t info;
void __user *pc;
- unsigned long flags;
/*
* According to the ARM ARM, PC is 2 or 4 bytes ahead,
@@ -325,17 +339,8 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
}
#endif
- spin_lock_irqsave(&undef_lock, flags);
- list_for_each_entry(hook, &undef_hook, node) {
- if ((instr & hook->instr_mask) == hook->instr_val &&
- (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) {
- if (hook->fn(regs, instr) == 0) {
- spin_unlock_irqrestore(&undef_lock, flags);
- return;
- }
- }
- }
- spin_unlock_irqrestore(&undef_lock, flags);
+ if (call_undef_hook(regs, instr) == 0)
+ return;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c
index 35eb232a649a..ae3e1c8c7583 100644
--- a/arch/arm/mach-footbridge/cats-pci.c
+++ b/arch/arm/mach-footbridge/cats-pci.c
@@ -18,6 +18,9 @@ static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
static int __init cats_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
+ if (dev->irq >= 255)
+ return -1; /* not a valid interrupt. */
+
if (dev->irq >= 128)
return dev->irq & 0x1f;
diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c
index ce5ea7c26675..7c49d55e6b27 100644
--- a/arch/arm/mach-integrator/cpu.c
+++ b/arch/arm/mach-integrator/cpu.c
@@ -3,8 +3,6 @@
*
* Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
*
- * $Id: cpu.c,v 1.6 2002/07/18 13:58:51 rmk Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h
index 83c4c1ceb411..028b87839c0f 100644
--- a/arch/arm/mach-integrator/include/mach/platform.h
+++ b/arch/arm/mach-integrator/include/mach/platform.h
@@ -26,8 +26,6 @@
* NOTE: This is a multi-hosted header file for use with uHAL and
* supported debuggers.
*
- * $Id: platform.s,v 1.32 2000/02/18 10:51:39 asims Exp $
- *
* ***********************************************************************/
#ifndef __address_h
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 0e509b8ad56e..189f16f3619d 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -15,15 +15,17 @@
#include <linux/mbus.h>
#include <linux/mv643xx_eth.h>
#include <linux/ata_platform.h>
+#include <linux/spi/orion_spi.h>
#include <asm/page.h>
#include <asm/timex.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <mach/kirkwood.h>
-#include <asm/plat-orion/cache-feroceon-l2.h>
-#include <asm/plat-orion/ehci-orion.h>
-#include <asm/plat-orion/orion_nand.h>
-#include <asm/plat-orion/time.h>
+#include <plat/cache-feroceon-l2.h>
+#include <plat/ehci-orion.h>
+#include <plat/mv_xor.h>
+#include <plat/orion_nand.h>
+#include <plat/time.h>
#include "common.h"
/*****************************************************************************
@@ -196,6 +198,37 @@ void __init kirkwood_sata_init(struct mv_sata_platform_data *sata_data)
/*****************************************************************************
+ * SPI
+ ****************************************************************************/
+static struct orion_spi_info kirkwood_spi_plat_data = {
+ .tclk = KIRKWOOD_TCLK,
+};
+
+static struct resource kirkwood_spi_resources[] = {
+ {
+ .start = SPI_PHYS_BASE,
+ .end = SPI_PHYS_BASE + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device kirkwood_spi = {
+ .name = "orion_spi",
+ .id = 0,
+ .resource = kirkwood_spi_resources,
+ .dev = {
+ .platform_data = &kirkwood_spi_plat_data,
+ },
+ .num_resources = ARRAY_SIZE(kirkwood_spi_resources),
+};
+
+void __init kirkwood_spi_init()
+{
+ platform_device_register(&kirkwood_spi);
+}
+
+
+/*****************************************************************************
* UART0
****************************************************************************/
static struct plat_serial8250_port kirkwood_uart0_data[] = {
@@ -284,6 +317,212 @@ void __init kirkwood_uart1_init(void)
/*****************************************************************************
+ * XOR
+ ****************************************************************************/
+static struct mv_xor_platform_shared_data kirkwood_xor_shared_data = {
+ .dram = &kirkwood_mbus_dram_info,
+};
+
+static u64 kirkwood_xor_dmamask = DMA_32BIT_MASK;
+
+
+/*****************************************************************************
+ * XOR0
+ ****************************************************************************/
+static struct resource kirkwood_xor0_shared_resources[] = {
+ {
+ .name = "xor 0 low",
+ .start = XOR0_PHYS_BASE,
+ .end = XOR0_PHYS_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "xor 0 high",
+ .start = XOR0_HIGH_PHYS_BASE,
+ .end = XOR0_HIGH_PHYS_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device kirkwood_xor0_shared = {
+ .name = MV_XOR_SHARED_NAME,
+ .id = 0,
+ .dev = {
+ .platform_data = &kirkwood_xor_shared_data,
+ },
+ .num_resources = ARRAY_SIZE(kirkwood_xor0_shared_resources),
+ .resource = kirkwood_xor0_shared_resources,
+};
+
+static struct resource kirkwood_xor00_resources[] = {
+ [0] = {
+ .start = IRQ_KIRKWOOD_XOR_00,
+ .end = IRQ_KIRKWOOD_XOR_00,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mv_xor_platform_data kirkwood_xor00_data = {
+ .shared = &kirkwood_xor0_shared,
+ .hw_id = 0,
+ .pool_size = PAGE_SIZE,
+};
+
+static struct platform_device kirkwood_xor00_channel = {
+ .name = MV_XOR_NAME,
+ .id = 0,
+ .num_resources = ARRAY_SIZE(kirkwood_xor00_resources),
+ .resource = kirkwood_xor00_resources,
+ .dev = {
+ .dma_mask = &kirkwood_xor_dmamask,
+ .coherent_dma_mask = DMA_64BIT_MASK,
+ .platform_data = (void *)&kirkwood_xor00_data,
+ },
+};
+
+static struct resource kirkwood_xor01_resources[] = {
+ [0] = {
+ .start = IRQ_KIRKWOOD_XOR_01,
+ .end = IRQ_KIRKWOOD_XOR_01,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mv_xor_platform_data kirkwood_xor01_data = {
+ .shared = &kirkwood_xor0_shared,
+ .hw_id = 1,
+ .pool_size = PAGE_SIZE,
+};
+
+static struct platform_device kirkwood_xor01_channel = {
+ .name = MV_XOR_NAME,
+ .id = 1,
+ .num_resources = ARRAY_SIZE(kirkwood_xor01_resources),
+ .resource = kirkwood_xor01_resources,
+ .dev = {
+ .dma_mask = &kirkwood_xor_dmamask,
+ .coherent_dma_mask = DMA_64BIT_MASK,
+ .platform_data = (void *)&kirkwood_xor01_data,
+ },
+};
+
+void __init kirkwood_xor0_init(void)
+{
+ platform_device_register(&kirkwood_xor0_shared);
+
+ /*
+ * two engines can't do memset simultaneously, this limitation
+ * satisfied by removing memset support from one of the engines.
+ */
+ dma_cap_set(DMA_MEMCPY, kirkwood_xor00_data.cap_mask);
+ dma_cap_set(DMA_XOR, kirkwood_xor00_data.cap_mask);
+ platform_device_register(&kirkwood_xor00_channel);
+
+ dma_cap_set(DMA_MEMCPY, kirkwood_xor01_data.cap_mask);
+ dma_cap_set(DMA_MEMSET, kirkwood_xor01_data.cap_mask);
+ dma_cap_set(DMA_XOR, kirkwood_xor01_data.cap_mask);
+ platform_device_register(&kirkwood_xor01_channel);
+}
+
+
+/*****************************************************************************
+ * XOR1
+ ****************************************************************************/
+static struct resource kirkwood_xor1_shared_resources[] = {
+ {
+ .name = "xor 1 low",
+ .start = XOR1_PHYS_BASE,
+ .end = XOR1_PHYS_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "xor 1 high",
+ .start = XOR1_HIGH_PHYS_BASE,
+ .end = XOR1_HIGH_PHYS_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device kirkwood_xor1_shared = {
+ .name = MV_XOR_SHARED_NAME,
+ .id = 1,
+ .dev = {
+ .platform_data = &kirkwood_xor_shared_data,
+ },
+ .num_resources = ARRAY_SIZE(kirkwood_xor1_shared_resources),
+ .resource = kirkwood_xor1_shared_resources,
+};
+
+static struct resource kirkwood_xor10_resources[] = {
+ [0] = {
+ .start = IRQ_KIRKWOOD_XOR_10,
+ .end = IRQ_KIRKWOOD_XOR_10,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mv_xor_platform_data kirkwood_xor10_data = {
+ .shared = &kirkwood_xor1_shared,
+ .hw_id = 0,
+ .pool_size = PAGE_SIZE,
+};
+
+static struct platform_device kirkwood_xor10_channel = {
+ .name = MV_XOR_NAME,
+ .id = 2,
+ .num_resources = ARRAY_SIZE(kirkwood_xor10_resources),
+ .resource = kirkwood_xor10_resources,
+ .dev = {
+ .dma_mask = &kirkwood_xor_dmamask,
+ .coherent_dma_mask = DMA_64BIT_MASK,
+ .platform_data = (void *)&kirkwood_xor10_data,
+ },
+};
+
+static struct resource kirkwood_xor11_resources[] = {
+ [0] = {
+ .start = IRQ_KIRKWOOD_XOR_11,
+ .end = IRQ_KIRKWOOD_XOR_11,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mv_xor_platform_data kirkwood_xor11_data = {
+ .shared = &kirkwood_xor1_shared,
+ .hw_id = 1,
+ .pool_size = PAGE_SIZE,
+};
+
+static struct platform_device kirkwood_xor11_channel = {
+ .name = MV_XOR_NAME,
+ .id = 3,
+ .num_resources = ARRAY_SIZE(kirkwood_xor11_resources),
+ .resource = kirkwood_xor11_resources,
+ .dev = {
+ .dma_mask = &kirkwood_xor_dmamask,
+ .coherent_dma_mask = DMA_64BIT_MASK,
+ .platform_data = (void *)&kirkwood_xor11_data,
+ },
+};
+
+void __init kirkwood_xor1_init(void)
+{
+ platform_device_register(&kirkwood_xor1_shared);
+
+ /*
+ * two engines can't do memset simultaneously, this limitation
+ * satisfied by removing memset support from one of the engines.
+ */
+ dma_cap_set(DMA_MEMCPY, kirkwood_xor10_data.cap_mask);
+ dma_cap_set(DMA_XOR, kirkwood_xor10_data.cap_mask);
+ platform_device_register(&kirkwood_xor10_channel);
+
+ dma_cap_set(DMA_MEMCPY, kirkwood_xor11_data.cap_mask);
+ dma_cap_set(DMA_MEMSET, kirkwood_xor11_data.cap_mask);
+ dma_cap_set(DMA_XOR, kirkwood_xor11_data.cap_mask);
+ platform_device_register(&kirkwood_xor11_channel);
+}
+
+
+/*****************************************************************************
* Time handling
****************************************************************************/
static void kirkwood_timer_init(void)
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index 5dee2f6b40a5..69cd113af03a 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -33,8 +33,11 @@ void kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data);
void kirkwood_pcie_init(void);
void kirkwood_rtc_init(void);
void kirkwood_sata_init(struct mv_sata_platform_data *sata_data);
+void kirkwood_spi_init(void);
void kirkwood_uart0_init(void);
void kirkwood_uart1_init(void);
+void kirkwood_xor0_init(void);
+void kirkwood_xor1_init(void);
extern struct sys_timer kirkwood_timer;
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index d1336b41f0fb..5c69992295e8 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -88,6 +88,15 @@
#define USB_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x50000)
+#define XOR0_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x60800)
+#define XOR0_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x60800)
+#define XOR1_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x60900)
+#define XOR1_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x60900)
+#define XOR0_HIGH_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x60A00)
+#define XOR0_HIGH_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x60A00)
+#define XOR1_HIGH_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x60B00)
+#define XOR1_HIGH_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x60B00)
+
#define GE00_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x70000)
#define GE01_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x74000)
diff --git a/arch/arm/mach-kirkwood/irq.c b/arch/arm/mach-kirkwood/irq.c
index 302bb2cf6669..5790643ffe07 100644
--- a/arch/arm/mach-kirkwood/irq.c
+++ b/arch/arm/mach-kirkwood/irq.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <asm/plat-orion/irq.h>
+#include <plat/irq.h>
#include "common.h"
void __init kirkwood_init_irq(void)
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index 8282d0ff84bf..2195fa31f6b7 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -12,7 +12,7 @@
#include <linux/pci.h>
#include <linux/mbus.h>
#include <asm/mach/pci.h>
-#include <asm/plat-orion/pcie.h>
+#include <plat/pcie.h>
#include "common.h"
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index 182230a5d198..a3012d445971 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -18,6 +18,9 @@
#include <linux/timer.h>
#include <linux/ata_platform.h>
#include <linux/mv643xx_eth.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/orion_spi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
@@ -34,6 +37,21 @@ static struct mv_sata_platform_data rd88f6192_sata_data = {
.n_ports = 2,
};
+static const struct flash_platform_data rd88F6192_spi_slave_data = {
+ .type = "m25p128",
+};
+
+static struct spi_board_info __initdata rd88F6192_spi_slave_info[] = {
+ {
+ .modalias = "m25p80",
+ .platform_data = &rd88F6192_spi_slave_data,
+ .irq = -1,
+ .max_speed_hz = 20000000,
+ .bus_num = 0,
+ .chip_select = 0,
+ },
+};
+
static void __init rd88f6192_init(void)
{
/*
@@ -45,7 +63,12 @@ static void __init rd88f6192_init(void)
kirkwood_ge00_init(&rd88f6192_ge00_data);
kirkwood_rtc_init();
kirkwood_sata_init(&rd88f6192_sata_data);
+ spi_register_board_info(rd88F6192_spi_slave_info,
+ ARRAY_SIZE(rd88F6192_spi_slave_info));
+ kirkwood_spi_init();
kirkwood_uart0_init();
+ kirkwood_xor0_init();
+ kirkwood_xor1_init();
}
static int __init rd88f6192_pci_init(void)
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index d8a43018c7d3..d96487a0f18b 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -23,7 +23,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
#include <mach/kirkwood.h>
-#include <asm/plat-orion/orion_nand.h>
+#include <plat/orion_nand.h>
#include "common.h"
static struct mtd_partition rd88f6281_nand_parts[] = {
diff --git a/arch/arm/mach-lh7a40x/include/mach/ssp.h b/arch/arm/mach-lh7a40x/include/mach/ssp.h
index 132b1c4d5ce6..509916182e34 100644
--- a/arch/arm/mach-lh7a40x/include/mach/ssp.h
+++ b/arch/arm/mach-lh7a40x/include/mach/ssp.h
@@ -1,5 +1,4 @@
/* ssp.h
- $Id$
written by Marc Singer
6 Dec 2004
diff --git a/arch/arm/mach-lh7a40x/lcd-panel.h b/arch/arm/mach-lh7a40x/lcd-panel.h
index df6e38ed425b..a7f5027b2f78 100644
--- a/arch/arm/mach-lh7a40x/lcd-panel.h
+++ b/arch/arm/mach-lh7a40x/lcd-panel.h
@@ -1,5 +1,4 @@
/* lcd-panel.h
- $Id$
written by Marc Singer
18 Jul 2005
diff --git a/arch/arm/mach-loki/common.c b/arch/arm/mach-loki/common.c
index e20cdbca1ebe..c0d2d9d12e74 100644
--- a/arch/arm/mach-loki/common.c
+++ b/arch/arm/mach-loki/common.c
@@ -19,8 +19,8 @@
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <mach/loki.h>
-#include <asm/plat-orion/orion_nand.h>
-#include <asm/plat-orion/time.h>
+#include <plat/orion_nand.h>
+#include <plat/time.h>
#include "common.h"
/*****************************************************************************
diff --git a/arch/arm/mach-loki/irq.c b/arch/arm/mach-loki/irq.c
index d839af91fe03..5a487930cb2f 100644
--- a/arch/arm/mach-loki/irq.c
+++ b/arch/arm/mach-loki/irq.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/io.h>
-#include <asm/plat-orion/irq.h>
+#include <plat/irq.h>
#include "common.h"
void __init loki_init_irq(void)
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index e633f9cb239f..953a26c469cb 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -18,10 +18,10 @@
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <mach/mv78xx0.h>
-#include <asm/plat-orion/cache-feroceon-l2.h>
-#include <asm/plat-orion/ehci-orion.h>
-#include <asm/plat-orion/orion_nand.h>
-#include <asm/plat-orion/time.h>
+#include <plat/cache-feroceon-l2.h>
+#include <plat/ehci-orion.h>
+#include <plat/orion_nand.h>
+#include <plat/time.h>
#include "common.h"
diff --git a/arch/arm/mach-mv78xx0/irq.c b/arch/arm/mach-mv78xx0/irq.c
index 3198abf54c90..28248d37b999 100644
--- a/arch/arm/mach-mv78xx0/irq.c
+++ b/arch/arm/mach-mv78xx0/irq.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <mach/mv78xx0.h>
-#include <asm/plat-orion/irq.h>
+#include <plat/irq.h>
#include "common.h"
void __init mv78xx0_init_irq(void)
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index b78e1443159f..430ea84d587d 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -12,7 +12,7 @@
#include <linux/pci.h>
#include <linux/mbus.h>
#include <asm/mach/pci.h>
-#include <asm/plat-orion/pcie.h>
+#include <plat/pcie.h>
#include "common.h"
struct pcie_port {
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index 1607c941d95f..10ef464d6be7 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -317,7 +317,6 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
printk(error, 6, status);
return -ENODEV;
}
- data->multipoint = 1;
tusb_device.dev.platform_data = data;
/* REVISIT let the driver know what DMA channels work */
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 168eeacaa4c0..7b11e552bc5a 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -26,9 +26,10 @@
#include <asm/mach/time.h>
#include <mach/hardware.h>
#include <mach/orion5x.h>
-#include <asm/plat-orion/ehci-orion.h>
-#include <asm/plat-orion/orion_nand.h>
-#include <asm/plat-orion/time.h>
+#include <plat/ehci-orion.h>
+#include <plat/mv_xor.h>
+#include <plat/orion_nand.h>
+#include <plat/time.h>
#include "common.h"
/*****************************************************************************
@@ -355,6 +356,103 @@ void __init orion5x_uart1_init(void)
/*****************************************************************************
+ * XOR engine
+ ****************************************************************************/
+static struct resource orion5x_xor_shared_resources[] = {
+ {
+ .name = "xor low",
+ .start = ORION5X_XOR_PHYS_BASE,
+ .end = ORION5X_XOR_PHYS_BASE + 0xff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "xor high",
+ .start = ORION5X_XOR_PHYS_BASE + 0x200,
+ .end = ORION5X_XOR_PHYS_BASE + 0x2ff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device orion5x_xor_shared = {
+ .name = MV_XOR_SHARED_NAME,
+ .id = 0,
+ .num_resources = ARRAY_SIZE(orion5x_xor_shared_resources),
+ .resource = orion5x_xor_shared_resources,
+};
+
+static u64 orion5x_xor_dmamask = DMA_32BIT_MASK;
+
+static struct resource orion5x_xor0_resources[] = {
+ [0] = {
+ .start = IRQ_ORION5X_XOR0,
+ .end = IRQ_ORION5X_XOR0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mv_xor_platform_data orion5x_xor0_data = {
+ .shared = &orion5x_xor_shared,
+ .hw_id = 0,
+ .pool_size = PAGE_SIZE,
+};
+
+static struct platform_device orion5x_xor0_channel = {
+ .name = MV_XOR_NAME,
+ .id = 0,
+ .num_resources = ARRAY_SIZE(orion5x_xor0_resources),
+ .resource = orion5x_xor0_resources,
+ .dev = {
+ .dma_mask = &orion5x_xor_dmamask,
+ .coherent_dma_mask = DMA_64BIT_MASK,
+ .platform_data = (void *)&orion5x_xor0_data,
+ },
+};
+
+static struct resource orion5x_xor1_resources[] = {
+ [0] = {
+ .start = IRQ_ORION5X_XOR1,
+ .end = IRQ_ORION5X_XOR1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mv_xor_platform_data orion5x_xor1_data = {
+ .shared = &orion5x_xor_shared,
+ .hw_id = 1,
+ .pool_size = PAGE_SIZE,
+};
+
+static struct platform_device orion5x_xor1_channel = {
+ .name = MV_XOR_NAME,
+ .id = 1,
+ .num_resources = ARRAY_SIZE(orion5x_xor1_resources),
+ .resource = orion5x_xor1_resources,
+ .dev = {
+ .dma_mask = &orion5x_xor_dmamask,
+ .coherent_dma_mask = DMA_64BIT_MASK,
+ .platform_data = (void *)&orion5x_xor1_data,
+ },
+};
+
+void __init orion5x_xor_init(void)
+{
+ platform_device_register(&orion5x_xor_shared);
+
+ /*
+ * two engines can't do memset simultaneously, this limitation
+ * satisfied by removing memset support from one of the engines.
+ */
+ dma_cap_set(DMA_MEMCPY, orion5x_xor0_data.cap_mask);
+ dma_cap_set(DMA_XOR, orion5x_xor0_data.cap_mask);
+ platform_device_register(&orion5x_xor0_channel);
+
+ dma_cap_set(DMA_MEMCPY, orion5x_xor1_data.cap_mask);
+ dma_cap_set(DMA_MEMSET, orion5x_xor1_data.cap_mask);
+ dma_cap_set(DMA_XOR, orion5x_xor1_data.cap_mask);
+ platform_device_register(&orion5x_xor1_channel);
+}
+
+
+/*****************************************************************************
* Time handling
****************************************************************************/
static void orion5x_timer_init(void)
@@ -382,6 +480,8 @@ static void __init orion5x_id(u32 *dev, u32 *rev, char **dev_name)
*dev_name = "MV88F5281-D2";
} else if (*rev == MV88F5281_REV_D1) {
*dev_name = "MV88F5281-D1";
+ } else if (*rev == MV88F5281_REV_D0) {
+ *dev_name = "MV88F5281-D0";
} else {
*dev_name = "MV88F5281-Rev-Unsupported";
}
@@ -416,6 +516,15 @@ void __init orion5x_init(void)
* Setup Orion address map
*/
orion5x_setup_cpu_mbus_bridge();
+
+ /*
+ * Don't issue "Wait for Interrupt" instruction if we are
+ * running on D0 5281 silicon.
+ */
+ if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
+ printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
+ disable_hlt();
+ }
}
/*
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index f72cf0e77544..e75bd7004b94 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -32,6 +32,7 @@ void orion5x_i2c_init(void);
void orion5x_sata_init(struct mv_sata_platform_data *sata_data);
void orion5x_uart0_init(void);
void orion5x_uart1_init(void);
+void orion5x_xor_init(void);
/*
* PCIe/PCI functions.
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index 48ce6d0e0020..ff13e9060b18 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -25,7 +25,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
#include <mach/orion5x.h>
-#include <asm/plat-orion/orion_nand.h>
+#include <plat/orion_nand.h>
#include "common.h"
#include "mpp.h"
diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h
index f52a7d65bec2..61eb74a88862 100644
--- a/arch/arm/mach-orion5x/include/mach/orion5x.h
+++ b/arch/arm/mach-orion5x/include/mach/orion5x.h
@@ -73,6 +73,7 @@
#define MV88F5182_REV_A2 2
/* Orion-2 (88F5281) */
#define MV88F5281_DEV_ID 0x5281
+#define MV88F5281_REV_D0 4
#define MV88F5281_REV_D1 5
#define MV88F5281_REV_D2 6
@@ -105,6 +106,10 @@
#define ORION5X_USB0_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x50000)
#define ORION5X_USB0_REG(x) (ORION5X_USB0_VIRT_BASE | (x))
+#define ORION5X_XOR_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x60900)
+#define ORION5X_XOR_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x60900)
+#define ORION5X_XOR_REG(x) (ORION5X_XOR_VIRT_BASE | (x))
+
#define ORION5X_ETH_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x70000)
#define ORION5X_ETH_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x70000)
#define ORION5X_ETH_REG(x) (ORION5X_ETH_VIRT_BASE | (x))
diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c
index cc2a017fd2a9..2545ff9e5830 100644
--- a/arch/arm/mach-orion5x/irq.c
+++ b/arch/arm/mach-orion5x/irq.c
@@ -16,7 +16,7 @@
#include <asm/gpio.h>
#include <asm/io.h>
#include <mach/orion5x.h>
-#include <asm/plat-orion/irq.h>
+#include <plat/irq.h>
#include "common.h"
/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index 0caaaac74bc1..cb72f1bb9cb7 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -25,7 +25,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
#include <mach/orion5x.h>
-#include <asm/plat-orion/orion_nand.h>
+#include <plat/orion_nand.h>
#include "common.h"
#include "mpp.h"
@@ -356,6 +356,7 @@ static void __init kurobox_pro_init(void)
orion5x_sata_init(&kurobox_pro_sata_data);
orion5x_uart0_init();
orion5x_uart1_init();
+ orion5x_xor_init();
orion5x_setup_dev_boot_win(KUROBOX_PRO_NOR_BOOT_BASE,
KUROBOX_PRO_NOR_BOOT_SIZE);
diff --git a/arch/arm/mach-orion5x/mss2-setup.c b/arch/arm/mach-orion5x/mss2-setup.c
index 4403cc963d66..53ff1893b883 100644
--- a/arch/arm/mach-orion5x/mss2-setup.c
+++ b/arch/arm/mach-orion5x/mss2-setup.c
@@ -239,6 +239,7 @@ static void __init mss2_init(void)
orion5x_i2c_init();
orion5x_sata_init(&mss2_sata_data);
orion5x_uart0_init();
+ orion5x_xor_init();
orion5x_setup_dev_boot_win(MSS2_NOR_BOOT_BASE, MSS2_NOR_BOOT_SIZE);
platform_device_register(&mss2_nor_flash);
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index 67b2c0df615f..978d4d599396 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -203,6 +203,7 @@ static void __init mv2120_init(void)
orion5x_i2c_init();
orion5x_sata_init(&mv2120_sata_data);
orion5x_uart0_init();
+ orion5x_xor_init();
orion5x_setup_dev_boot_win(MV2120_NOR_BOOT_BASE, MV2120_NOR_BOOT_SIZE);
platform_device_register(&mv2120_nor_flash);
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index 256a4f680935..fbceecc4b7ec 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -14,7 +14,7 @@
#include <linux/pci.h>
#include <linux/mbus.h>
#include <asm/mach/pci.h>
-#include <asm/plat-orion/pcie.h>
+#include <plat/pcie.h>
#include "common.h"
/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c
index 8771cb76f0dc..4c3bcd76ac85 100644
--- a/arch/arm/mach-orion5x/rd88f5182-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c
@@ -292,6 +292,7 @@ static void __init rd88f5182_init(void)
orion5x_i2c_init();
orion5x_sata_init(&rd88f5182_sata_data);
orion5x_uart0_init();
+ orion5x_xor_init();
orion5x_setup_dev_boot_win(RD88F5182_NOR_BOOT_BASE,
RD88F5182_NOR_BOOT_SIZE);
diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c
index 809132de31d2..dd657163cd8d 100644
--- a/arch/arm/mach-orion5x/ts209-setup.c
+++ b/arch/arm/mach-orion5x/ts209-setup.c
@@ -207,12 +207,12 @@ static struct i2c_board_info __initdata qnap_ts209_i2c_rtc = {
static struct gpio_keys_button qnap_ts209_buttons[] = {
{
- .code = KEY_RESTART,
+ .code = KEY_COPY,
.gpio = QNAP_TS209_GPIO_KEY_MEDIA,
.desc = "USB Copy Button",
.active_low = 1,
}, {
- .code = KEY_POWER,
+ .code = KEY_RESTART,
.gpio = QNAP_TS209_GPIO_KEY_RESET,
.desc = "Reset Button",
.active_low = 1,
@@ -296,6 +296,7 @@ static void __init qnap_ts209_init(void)
orion5x_i2c_init();
orion5x_sata_init(&qnap_ts209_sata_data);
orion5x_uart0_init();
+ orion5x_xor_init();
orion5x_setup_dev_boot_win(QNAP_TS209_NOR_BOOT_BASE,
QNAP_TS209_NOR_BOOT_SIZE);
diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c
index 6053e76ac967..b27d2b762081 100644
--- a/arch/arm/mach-orion5x/ts409-setup.c
+++ b/arch/arm/mach-orion5x/ts409-setup.c
@@ -3,6 +3,9 @@
*
* Maintainer: Sylver Bruneau <sylver.bruneau@gmail.com>
*
+ * Copyright (C) 2008 Sylver Bruneau <sylver.bruneau@gmail.com>
+ * Copyright (C) 2008 Martin Michlmayr <tbm@cyrius.com>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -16,6 +19,7 @@
#include <linux/irq.h>
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
+#include <linux/leds.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -162,16 +166,59 @@ static struct i2c_board_info __initdata qnap_ts409_i2c_rtc = {
I2C_BOARD_INFO("s35390a", 0x30),
};
+/*****************************************************************************
+ * LEDs attached to GPIO
+ ****************************************************************************/
+
+static struct gpio_led ts409_led_pins[] = {
+ {
+ .name = "ts409:red:sata1",
+ .gpio = 4,
+ .active_low = 1,
+ }, {
+ .name = "ts409:red:sata2",
+ .gpio = 5,
+ .active_low = 1,
+ }, {
+ .name = "ts409:red:sata3",
+ .gpio = 6,
+ .active_low = 1,
+ }, {
+ .name = "ts409:red:sata4",
+ .gpio = 7,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led_platform_data ts409_led_data = {
+ .leds = ts409_led_pins,
+ .num_leds = ARRAY_SIZE(ts409_led_pins),
+};
+
+static struct platform_device ts409_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &ts409_led_data,
+ },
+};
+
/****************************************************************************
* GPIO Attached Keys
* Power button is attached to the PIC microcontroller
****************************************************************************/
+#define QNAP_TS409_GPIO_KEY_RESET 14
#define QNAP_TS409_GPIO_KEY_MEDIA 15
static struct gpio_keys_button qnap_ts409_buttons[] = {
{
.code = KEY_RESTART,
+ .gpio = QNAP_TS409_GPIO_KEY_RESET,
+ .desc = "Reset Button",
+ .active_low = 1,
+ }, {
+ .code = KEY_COPY,
.gpio = QNAP_TS409_GPIO_KEY_MEDIA,
.desc = "USB Copy Button",
.active_low = 1,
@@ -255,6 +302,7 @@ static void __init qnap_ts409_init(void)
if (qnap_ts409_i2c_rtc.irq == 0)
pr_warning("qnap_ts409_init: failed to get RTC IRQ\n");
i2c_register_board_info(0, &qnap_ts409_i2c_rtc, 1);
+ platform_device_register(&ts409_leds);
/* register tsx09 specific power-off method */
pm_power_off = qnap_tsx09_power_off;
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 014916a28fdc..ae0a5dccd2a1 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -256,6 +256,7 @@ static void __init ts78xx_init(void)
orion5x_sata_init(&ts78xx_sata_data);
orion5x_uart0_init();
orion5x_uart1_init();
+ orion5x_xor_init();
orion5x_setup_dev_boot_win(TS78XX_NOR_BOOT_BASE,
TS78XX_NOR_BOOT_SIZE);
diff --git a/arch/arm/mach-pxa/include/mach/mtd-xip.h b/arch/arm/mach-pxa/include/mach/mtd-xip.h
index 351f32f13ce4..4d452fcb1508 100644
--- a/arch/arm/mach-pxa/include/mach/mtd-xip.h
+++ b/arch/arm/mach-pxa/include/mach/mtd-xip.h
@@ -10,8 +10,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * $Id: xip.h,v 1.2 2004/12/01 15:49:10 nico Exp $
*/
#ifndef __ARCH_PXA_MTD_XIP_H__
diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h
index 8956afe8195e..67debc47e8c6 100644
--- a/arch/arm/mach-pxa/include/mach/poodle.h
+++ b/arch/arm/mach-pxa/include/mach/poodle.h
@@ -70,6 +70,12 @@
#define POODLE_SCOOP_IO_DIR ( POODLE_SCOOP_VPEN | POODLE_SCOOP_HS_OUT )
#define POODLE_SCOOP_IO_OUT ( 0 )
+#define POODLE_LOCOMO_GPIO_AMP_ON LOCOMO_GPIO(8)
+#define POODLE_LOCOMO_GPIO_MUTE_L LOCOMO_GPIO(10)
+#define POODLE_LOCOMO_GPIO_MUTE_R LOCOMO_GPIO(11)
+#define POODLE_LOCOMO_GPIO_232VCC_ON LOCOMO_GPIO(12)
+#define POODLE_LOCOMO_GPIO_JK_B LOCOMO_GPIO(13)
+
extern struct platform_device poodle_locomo_device;
#endif /* __ASM_ARCH_POODLE_H */
diff --git a/arch/arm/mach-pxa/include/mach/pxafb.h b/arch/arm/mach-pxa/include/mach/pxafb.h
index 65447549616f..8e591118371e 100644
--- a/arch/arm/mach-pxa/include/mach/pxafb.h
+++ b/arch/arm/mach-pxa/include/mach/pxafb.h
@@ -28,6 +28,7 @@
* bits 10-17 : for AC Bias Pin Frequency
* bit 18 : for output enable polarity
* bit 19 : for pixel clock edge
+ * bit 20 : for output pixel format when base is RGBT16
*/
#define LCD_CONN_TYPE(_x) ((_x) & 0x0f)
#define LCD_CONN_WIDTH(_x) (((_x) >> 4) & 0x1f)
@@ -53,10 +54,11 @@
#define LCD_SMART_PANEL_18BPP ((18 << 4) | LCD_TYPE_SMART_PANEL)
#define LCD_AC_BIAS_FREQ(x) (((x) & 0xff) << 10)
-#define LCD_BIAS_ACTIVE_HIGH (0 << 17)
-#define LCD_BIAS_ACTIVE_LOW (1 << 17)
-#define LCD_PCLK_EDGE_RISE (0 << 18)
-#define LCD_PCLK_EDGE_FALL (1 << 18)
+#define LCD_BIAS_ACTIVE_HIGH (0 << 18)
+#define LCD_BIAS_ACTIVE_LOW (1 << 18)
+#define LCD_PCLK_EDGE_RISE (0 << 19)
+#define LCD_PCLK_EDGE_FALL (1 << 19)
+#define LCD_ALTERNATE_MAPPING (1 << 20)
/*
* This structure describes the machine which we are running on.
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-clock.h b/arch/arm/mach-s3c2410/include/mach/regs-clock.h
index d583688458a4..b3f90aa78076 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-clock.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-clock.h
@@ -11,7 +11,7 @@
*/
#ifndef __ASM_ARM_REGS_CLOCK
-#define __ASM_ARM_REGS_CLOCK "$Id: clock.h,v 1.4 2003/04/30 14:50:51 ben Exp $"
+#define __ASM_ARM_REGS_CLOCK
#define S3C2410_CLKREG(x) ((x) + S3C24XX_VA_CLKPWR)
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
index 30bec027f5fa..528080ceac44 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
@@ -12,7 +12,7 @@
#ifndef __ASM_ARCH_REGS_GPIO_H
-#define __ASM_ARCH_REGS_GPIO_H "$Id: gpio.h,v 1.5 2003/05/19 12:51:08 ben Exp $"
+#define __ASM_ARCH_REGS_GPIO_H
#define S3C2410_GPIONO(bank,offset) ((bank) + (offset))
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-irq.h b/arch/arm/mach-s3c2410/include/mach/regs-irq.h
index b057c06d167a..de86ee8812bd 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-irq.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-irq.h
@@ -10,7 +10,7 @@
#ifndef ___ASM_ARCH_REGS_IRQ_H
-#define ___ASM_ARCH_REGS_IRQ_H "$Id: irq.h,v 1.3 2003/03/25 21:29:06 ben Exp $"
+#define ___ASM_ARCH_REGS_IRQ_H
/* interrupt controller */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-lcd.h b/arch/arm/mach-s3c2410/include/mach/regs-lcd.h
index 893b8742f954..ee8f040aff5f 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-lcd.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-lcd.h
@@ -10,7 +10,7 @@
#ifndef ___ASM_ARCH_REGS_LCD_H
-#define ___ASM_ARCH_REGS_LCD_H "$Id: lcd.h,v 1.3 2003/06/26 13:25:06 ben Exp $"
+#define ___ASM_ARCH_REGS_LCD_H
#define S3C2410_LCDREG(x) (x)
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-mem.h b/arch/arm/mach-s3c2410/include/mach/regs-mem.h
index f9926abd5cde..57759804e2fa 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-mem.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-mem.h
@@ -11,7 +11,7 @@
*/
#ifndef __ASM_ARM_MEMREGS_H
-#define __ASM_ARM_MEMREGS_H "$Id: regs.h,v 1.8 2003/05/01 15:55:41 ben Exp $"
+#define __ASM_ARM_MEMREGS_H
#ifndef S3C2410_MEMREG
#define S3C2410_MEMREG(x) (S3C24XX_VA_MEMCTRL + (x))
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index fb1e78e28e50..24c6334fac89 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -562,7 +562,7 @@ static struct platform_device *bast_devices[] __initdata = {
&bast_sio,
};
-static struct clk *bast_clocks[] = {
+static struct clk *bast_clocks[] __initdata = {
&s3c24xx_dclk0,
&s3c24xx_dclk1,
&s3c24xx_clkout0,
diff --git a/arch/arm/mach-s3c2410/mach-smdk2410.c b/arch/arm/mach-s3c2410/mach-smdk2410.c
index c9040080727e..b88939d72282 100644
--- a/arch/arm/mach-s3c2410/mach-smdk2410.c
+++ b/arch/arm/mach-s3c2410/mach-smdk2410.c
@@ -5,7 +5,6 @@
* Copyright (C) 2004 by FS Forth-Systeme GmbH
* All rights reserved.
*
- * $Id: mach-smdk2410.c,v 1.1 2004/05/11 14:15:38 mpietrek Exp $
* @Author: Jonas Dietsche
*
* This program is free software; you can redistribute it and/or
diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c
index 12cbca68f57d..fbc0213d5485 100644
--- a/arch/arm/mach-s3c2410/mach-vr1000.c
+++ b/arch/arm/mach-s3c2410/mach-vr1000.c
@@ -344,7 +344,7 @@ static struct platform_device *vr1000_devices[] __initdata = {
&vr1000_led3,
};
-static struct clk *vr1000_clocks[] = {
+static struct clk *vr1000_clocks[] __initdata = {
&s3c24xx_dclk0,
&s3c24xx_dclk1,
&s3c24xx_clkout0,
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 30f613a79bfe..4c061d29463c 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -26,9 +26,6 @@
#include <linux/spi/spi.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-s3c2440/mach-anubis.c b/arch/arm/mach-s3c2440/mach-anubis.c
index 265c77dec9d7..441f4bc09472 100644
--- a/arch/arm/mach-s3c2440/mach-anubis.c
+++ b/arch/arm/mach-s3c2440/mach-anubis.c
@@ -414,7 +414,7 @@ static struct platform_device *anubis_devices[] __initdata = {
&anubis_device_sm501,
};
-static struct clk *anubis_clocks[] = {
+static struct clk *anubis_clocks[] __initdata = {
&s3c24xx_dclk0,
&s3c24xx_dclk1,
&s3c24xx_clkout0,
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c
index d2ee0cd148c6..8b83f93b6102 100644
--- a/arch/arm/mach-s3c2440/mach-osiris.c
+++ b/arch/arm/mach-s3c2440/mach-osiris.c
@@ -341,7 +341,7 @@ static struct platform_device *osiris_devices[] __initdata = {
&osiris_pcmcia,
};
-static struct clk *osiris_clocks[] = {
+static struct clk *osiris_clocks[] __initdata = {
&s3c24xx_dclk0,
&s3c24xx_dclk1,
&s3c24xx_clkout0,
diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c
index 39d38c801736..029dbfbbafcf 100644
--- a/arch/arm/mach-sa1100/cpu-sa1110.c
+++ b/arch/arm/mach-sa1100/cpu-sa1110.c
@@ -3,8 +3,6 @@
*
* Copyright (C) 2001 Russell King
*
- * $Id: cpu-sa1110.c,v 1.9 2002/07/06 16:53:18 rmk Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/arch/arm/mach-sa1100/include/mach/mtd-xip.h b/arch/arm/mach-sa1100/include/mach/mtd-xip.h
index 80cfdac2b944..eaa09e86ad16 100644
--- a/arch/arm/mach-sa1100/include/mach/mtd-xip.h
+++ b/arch/arm/mach-sa1100/include/mach/mtd-xip.h
@@ -10,8 +10,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * $Id: xip.h,v 1.2 2004/12/01 15:49:10 nico Exp $
*/
#ifndef __ARCH_SA1100_MTD_XIP_H__
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 20eec4ba173f..7b5a25d81576 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <asm/cacheflush.h>
-#include <asm/plat-orion/cache-feroceon-l2.h>
+#include <plat/cache-feroceon-l2.h>
/*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 2d6d682c206a..25d9a11eb617 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -568,6 +568,55 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
create_mapping(io_desc + i);
}
+static int __init check_membank_valid(struct membank *mb)
+{
+ /*
+ * Check whether this memory region has non-zero size.
+ */
+ if (mb->size == 0)
+ return 0;
+
+ /*
+ * Check whether this memory region would entirely overlap
+ * the vmalloc area.
+ */
+ if (phys_to_virt(mb->start) >= VMALLOC_MIN) {
+ printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
+ "(vmalloc region overlap).\n",
+ mb->start, mb->start + mb->size - 1);
+ return 0;
+ }
+
+ /*
+ * Check whether this memory region would partially overlap
+ * the vmalloc area.
+ */
+ if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) ||
+ phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) {
+ unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start);
+
+ printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
+ "to -%.8lx (vmalloc region overlap).\n",
+ mb->start, mb->start + mb->size - 1,
+ mb->start + newsize - 1);
+ mb->size = newsize;
+ }
+
+ return 1;
+}
+
+static void __init sanity_check_meminfo(struct meminfo *mi)
+{
+ int i;
+ int j;
+
+ for (i = 0, j = 0; i < mi->nr_banks; i++) {
+ if (check_membank_valid(&mi->bank[i]))
+ mi->bank[j++] = mi->bank[i];
+ }
+ mi->nr_banks = j;
+}
+
static inline void prepare_page_table(struct meminfo *mi)
{
unsigned long addr;
@@ -753,6 +802,7 @@ void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
void *zero_page;
build_mem_type_table();
+ sanity_check_meminfo(mi);
prepare_page_table(mi);
bootmem_init(mi);
devicemaps_init(mdesc);
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 1a3d63df8e90..551244d5ca19 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -15,6 +15,7 @@
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
+#include "proc-macros.S"
/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
#define CACHE_DLINESIZE 16
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 82d579ac9b98..6168c6160dee 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -17,6 +17,7 @@
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
+#include "proc-macros.S"
/*
* ARM946E-S is synthesizable to have 0KB to 1MB sized D-Cache,
diff --git a/arch/arm/plat-omap/include/mach/memory.h b/arch/arm/plat-omap/include/mach/memory.h
index 037486c5f4a4..a325caf80d04 100644
--- a/arch/arm/plat-omap/include/mach/memory.h
+++ b/arch/arm/plat-omap/include/mach/memory.h
@@ -76,13 +76,14 @@
(dma_addr_t)virt_to_lbus(page_address(page)) : \
(dma_addr_t)__virt_to_bus(page_address(page));})
-#define __arch_dma_to_virt(dev, addr) ({is_lbus_device(dev) ? \
- lbus_to_virt(addr) : \
- __bus_to_virt(addr);})
-
-#define __arch_virt_to_dma(dev, addr) ({is_lbus_device(dev) ? \
- virt_to_lbus(addr) : \
- __virt_to_bus(addr);})
+#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
+ lbus_to_virt(addr) : \
+ __bus_to_virt(addr)); })
+
+#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \
+ (dma_addr_t) (is_lbus_device(dev) ? \
+ virt_to_lbus(__addr) : \
+ __virt_to_bus(__addr)); })
#endif /* CONFIG_ARCH_OMAP15XX */
diff --git a/include/asm-arm/plat-orion/cache-feroceon-l2.h b/arch/arm/plat-orion/include/plat/cache-feroceon-l2.h
index ba4e016d3ec0..06f982d55697 100644
--- a/include/asm-arm/plat-orion/cache-feroceon-l2.h
+++ b/arch/arm/plat-orion/include/plat/cache-feroceon-l2.h
@@ -1,5 +1,5 @@
/*
- * include/asm-arm/plat-orion/cache-feroceon-l2.h
+ * arch/arm/plat-orion/include/plat/cache-feroceon-l2.h
*
* Copyright (C) 2008 Marvell Semiconductor
*
diff --git a/include/asm-arm/plat-orion/ehci-orion.h b/arch/arm/plat-orion/include/plat/ehci-orion.h
index 785705651e24..64343051095a 100644
--- a/include/asm-arm/plat-orion/ehci-orion.h
+++ b/arch/arm/plat-orion/include/plat/ehci-orion.h
@@ -1,13 +1,13 @@
/*
- * include/asm-arm/plat-orion/ehci-orion.h
+ * arch/arm/plat-orion/include/plat/ehci-orion.h
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#ifndef __ASM_PLAT_ORION_EHCI_ORION_H
-#define __ASM_PLAT_ORION_EHCI_ORION_H
+#ifndef __PLAT_EHCI_ORION_H
+#define __PLAT_EHCI_ORION_H
#include <linux/mbus.h>
diff --git a/include/asm-arm/plat-orion/irq.h b/arch/arm/plat-orion/include/plat/irq.h
index 94aeed919d5b..f05eeab94968 100644
--- a/include/asm-arm/plat-orion/irq.h
+++ b/arch/arm/plat-orion/include/plat/irq.h
@@ -1,5 +1,5 @@
/*
- * include/asm-arm/plat-orion/irq.h
+ * arch/arm/plat-orion/include/plat/irq.h
*
* Marvell Orion SoC IRQ handling.
*
@@ -8,8 +8,8 @@
* warranty of any kind, whether express or implied.
*/
-#ifndef __ASM_PLAT_ORION_IRQ_H
-#define __ASM_PLAT_ORION_IRQ_H
+#ifndef __PLAT_IRQ_H
+#define __PLAT_IRQ_H
void orion_irq_init(unsigned int irq_start, void __iomem *maskaddr);
diff --git a/include/asm-arm/plat-orion/mv_xor.h b/arch/arm/plat-orion/include/plat/mv_xor.h
index c349e8ff5cc0..bd5f3bdb4ae3 100644
--- a/include/asm-arm/plat-orion/mv_xor.h
+++ b/arch/arm/plat-orion/include/plat/mv_xor.h
@@ -1,9 +1,11 @@
/*
+ * arch/arm/plat-orion/include/plat/mv_xor.h
+ *
* Marvell XOR platform device data definition file.
*/
-#ifndef __ASM_PLAT_ORION_MV_XOR_H
-#define __ASM_PLAT_ORION_MV_XOR_H
+#ifndef __PLAT_MV_XOR_H
+#define __PLAT_MV_XOR_H
#include <linux/dmaengine.h>
#include <linux/mbus.h>
diff --git a/include/asm-arm/plat-orion/orion_nand.h b/arch/arm/plat-orion/include/plat/orion_nand.h
index ad4ce94c1998..d6a4cfa37785 100644
--- a/include/asm-arm/plat-orion/orion_nand.h
+++ b/arch/arm/plat-orion/include/plat/orion_nand.h
@@ -1,13 +1,13 @@
/*
- * include/asm-arm/plat-orion/orion_nand.h
+ * arch/arm/plat-orion/include/plat/orion_nand.h
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#ifndef __ASM_PLAT_ORION_ORION_NAND_H
-#define __ASM_PLAT_ORION_ORION_NAND_H
+#ifndef __PLAT_ORION_NAND_H
+#define __PLAT_ORION_NAND_H
/*
* Device bus NAND private data
diff --git a/include/asm-arm/plat-orion/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index e61b7bd97af5..3ebfef72b4e7 100644
--- a/include/asm-arm/plat-orion/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -1,5 +1,5 @@
/*
- * include/asm-arm/plat-orion/pcie.h
+ * arch/arm/plat-orion/include/plat/pcie.h
*
* Marvell Orion SoC PCIe handling.
*
@@ -8,8 +8,8 @@
* warranty of any kind, whether express or implied.
*/
-#ifndef __ASM_PLAT_ORION_PCIE_H
-#define __ASM_PLAT_ORION_PCIE_H
+#ifndef __PLAT_PCIE_H
+#define __PLAT_PCIE_H
u32 orion_pcie_dev_id(void __iomem *base);
u32 orion_pcie_rev(void __iomem *base);
diff --git a/include/asm-arm/plat-orion/time.h b/arch/arm/plat-orion/include/plat/time.h
index 0e85cc8f44d9..c06ca35f3613 100644
--- a/include/asm-arm/plat-orion/time.h
+++ b/arch/arm/plat-orion/include/plat/time.h
@@ -1,5 +1,5 @@
/*
- * include/asm-arm/plat-orion/time.h
+ * arch/arm/plat-orion/include/plat/time.h
*
* Marvell Orion SoC time handling.
*
@@ -8,8 +8,8 @@
* warranty of any kind, whether express or implied.
*/
-#ifndef __ASM_PLAT_ORION_TIME_H
-#define __ASM_PLAT_ORION_TIME_H
+#ifndef __PLAT_TIME_H
+#define __PLAT_TIME_H
void orion_time_init(unsigned int irq, unsigned int tclk);
diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c
index fe66a1835169..3f9d34fc738c 100644
--- a/arch/arm/plat-orion/irq.c
+++ b/arch/arm/plat-orion/irq.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <asm/plat-orion/irq.h>
+#include <plat/irq.h>
static void orion_irq_mask(u32 irq)
{
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index ca32c60e14d7..883902fead89 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -12,7 +12,7 @@
#include <linux/pci.h>
#include <linux/mbus.h>
#include <asm/mach/pci.h>
-#include <asm/plat-orion/pcie.h>
+#include <plat/pcie.h>
/*
* PCIe unit register offsets.
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 6d60f0476bb8..89ce60eabd5b 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -169,9 +169,7 @@ static struct map_desc s3c_iodesc[] __initdata = {
IODESC_ENT(UART)
};
-
-static struct cpu_table *
-s3c_lookup_cpu(unsigned long idcode)
+static struct cpu_table * __init s3c_lookup_cpu(unsigned long idcode)
{
struct cpu_table *tab;
int count;
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 8b8f564c3aa2..56281c030a7b 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Sun Jul 13 12:04:05 2008
+# Last update: Wed Aug 13 21:56:02 2008
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -843,7 +843,7 @@ borzoi MACH_BORZOI BORZOI 831
gecko MACH_GECKO GECKO 832
ds101 MACH_DS101 DS101 833
omap_palmtt2 MACH_OMAP_PALMTT2 OMAP_PALMTT2 834
-xscale_palmld MACH_XSCALE_PALMLD XSCALE_PALMLD 835
+palmld MACH_PALMLD PALMLD 835
cc9c MACH_CC9C CC9C 836
sbc1670 MACH_SBC1670 SBC1670 837
ixdp28x5 MACH_IXDP28X5 IXDP28X5 838
@@ -852,7 +852,7 @@ ml696k MACH_ML696K ML696K 840
arcom_zeus MACH_ARCOM_ZEUS ARCOM_ZEUS 841
osiris MACH_OSIRIS OSIRIS 842
maestro MACH_MAESTRO MAESTRO 843
-tunge2 MACH_TUNGE2 TUNGE2 844
+palmte2 MACH_PALMTE2 PALMTE2 844
ixbbm MACH_IXBBM IXBBM 845
mx27ads MACH_MX27ADS MX27ADS 846
ax8004 MACH_AX8004 AX8004 847
@@ -916,7 +916,7 @@ nxdb500 MACH_NXDB500 NXDB500 905
apf9328 MACH_APF9328 APF9328 906
omap_wipoq MACH_OMAP_WIPOQ OMAP_WIPOQ 907
omap_twip MACH_OMAP_TWIP OMAP_TWIP 908
-palmtreo650 MACH_PALMTREO650 PALMTREO650 909
+palmt650 MACH_PALMT650 PALMT650 909
acumen MACH_ACUMEN ACUMEN 910
xp100 MACH_XP100 XP100 911
fs2410 MACH_FS2410 FS2410 912
@@ -1096,7 +1096,7 @@ atc6 MACH_ATC6 ATC6 1086
multmdw MACH_MULTMDW MULTMDW 1087
mba2440 MACH_MBA2440 MBA2440 1088
ecsd MACH_ECSD ECSD 1089
-zire31 MACH_ZIRE31 ZIRE31 1090
+palmz31 MACH_PALMZ31 PALMZ31 1090
fsg MACH_FSG FSG 1091
razor101 MACH_RAZOR101 RAZOR101 1092
opera_tdm MACH_OPERA_TDM OPERA_TDM 1093
@@ -1810,7 +1810,7 @@ kriss_sensor MACH_KRISS_SENSOR KRISS_SENSOR 1819
pilz_pmi5 MACH_PILZ_PMI5 PILZ_PMI5 1820
jade MACH_JADE JADE 1821
ks8695_softplc MACH_KS8695_SOFTPLC KS8695_SOFTPLC 1822
-gprisc4 MACH_GPRISC4 GPRISC4 1823
+gprisc3 MACH_GPRISC4 GPRISC4 1823
stamp9260 MACH_STAMP9260 STAMP9260 1824
smdk6430 MACH_SMDK6430 SMDK6430 1825
smdkc100 MACH_SMDKC100 SMDKC100 1826
@@ -1820,3 +1820,44 @@ deister_eyecam MACH_DEISTER_EYECAM DEISTER_EYECAM 1829
at91sam9m10ek MACH_AT91SAM9M10EK AT91SAM9M10EK 1830
linkstation_produo MACH_LINKSTATION_PRODUO LINKSTATION_PRODUO 1831
hit_b0 MACH_HIT_B0 HIT_B0 1832
+adx_rmu MACH_ADX_RMU ADX_RMU 1833
+xg_cpe_main MACH_XG_CPE_MAIN XG_CPE_MAIN 1834
+edb9407a MACH_EDB9407A EDB9407A 1835
+dtb9608 MACH_DTB9608 DTB9608 1836
+em104v1 MACH_EM104V1 EM104V1 1837
+demo MACH_DEMO DEMO 1838
+logi9260 MACH_LOGI9260 LOGI9260 1839
+mx31_exm32 MACH_MX31_EXM32 MX31_EXM32 1840
+usb_a9g20 MACH_USB_A9G20 USB_A9G20 1841
+picproje2008 MACH_PICPROJE2008 PICPROJE2008 1842
+cs_e9315 MACH_CS_E9315 CS_E9315 1843
+qil_a9g20 MACH_QIL_A9G20 QIL_A9G20 1844
+sha_pon020 MACH_SHA_PON020 SHA_PON020 1845
+nad MACH_NAD NAD 1846
+sbc35_a9260 MACH_SBC35_A9260 SBC35_A9260 1847
+sbc35_a9g20 MACH_SBC35_A9G20 SBC35_A9G20 1848
+davinci_beginning MACH_DAVINCI_BEGINNING DAVINCI_BEGINNING 1849
+uwc MACH_UWC UWC 1850
+mxlads MACH_MXLADS MXLADS 1851
+htcnike MACH_HTCNIKE HTCNIKE 1852
+deister_pxa270 MACH_DEISTER_PXA270 DEISTER_PXA270 1853
+cme9210js MACH_CME9210JS CME9210JS 1854
+cc9p9360 MACH_CC9P9360 CC9P9360 1855
+mocha MACH_MOCHA MOCHA 1856
+wapd170ag MACH_WAPD170AG WAPD170AG 1857
+linkstation_mini MACH_LINKSTATION_MINI LINKSTATION_MINI 1858
+afeb9260 MACH_AFEB9260 AFEB9260 1859
+w90x900 MACH_W90X900 W90X900 1860
+w90x700 MACH_W90X700 W90X700 1861
+kt300ip MACH_KT300IP KT300IP 1862
+kt300ip_g20 MACH_KT300IP_G20 KT300IP_G20 1863
+srcm MACH_SRCM SRCM 1864
+wlnx_9260 MACH_WLNX_9260 WLNX_9260 1865
+openmoko_gta03 MACH_OPENMOKO_GTA03 OPENMOKO_GTA03 1866
+osprey2 MACH_OSPREY2 OSPREY2 1867
+kbio9260 MACH_KBIO9260 KBIO9260 1868
+ginza MACH_GINZA GINZA 1869
+a636n MACH_A636N A636N 1870
+imx27ipcam MACH_IMX27IPCAM IMX27IPCAM 1871
+nenoc MACH_NEMOC NEMOC 1872
+geneva MACH_GENEVA GENEVA 1873
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 5a097c46bc46..f64d25973a37 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -249,7 +249,7 @@ config MEM_MT48LC8M32B2B5_7
config MEM_MT48LC32M16A2TG_75
bool
- depends on (BFIN527_EZKIT || BFIN532_IP0X)
+ depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP)
default y
source "arch/blackfin/mach-bf527/Kconfig"
@@ -292,7 +292,7 @@ config CLKIN_HZ
int "Frequency of the crystal on the board in Hz"
default "11059200" if BFIN533_STAMP
default "27000000" if BFIN533_EZKIT
- default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS)
+ default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP)
default "30000000" if BFIN561_EZKIT
default "24576000" if PNAV10
default "10000000" if BFIN532_IP0X
@@ -332,7 +332,7 @@ config VCO_MULT
default "22" if BFIN533_BLUETECHNIX_CM
default "20" if (BFIN537_BLUETECHNIX_CM || BFIN527_BLUETECHNIX_CM || BFIN561_BLUETECHNIX_CM)
default "20" if BFIN561_EZKIT
- default "16" if H8606_HVSISTEMAS
+ default "16" if (H8606_HVSISTEMAS || BLACKSTAMP)
help
This controls the frequency of the on-chip PLL. This can be between 1 and 64.
PLL Frequency = (Crystal Frequency) * (this setting)
@@ -622,6 +622,33 @@ config CPLB_SWITCH_TAB_L1
If enabled, the CPLB Switch Tables are linked
into L1 data memory. (less latency)
+comment "Speed Optimizations"
+config BFIN_INS_LOWOVERHEAD
+ bool "ins[bwl] low overhead, higher interrupt latency"
+ default y
+ help
+ Reads on the Blackfin are speculative. In Blackfin terms, this means
+ they can be interrupted at any time (even after they have been issued
+ on to the external bus), and re-issued after the interrupt occurs.
+ For memory - this is not a big deal, since memory does not change if
+ it sees a read.
+
+ If a FIFO is sitting on the end of the read, it will see two reads,
+ when the core only sees one since the FIFO receives both the read
+ which is cancelled (and not delivered to the core) and the one which
+ is re-issued (which is delivered to the core).
+
+ To solve this, interrupts are turned off before reads occur to
+ I/O space. This option controls which the overhead/latency of
+ controlling interrupts during this time
+ "n" turns interrupts off every read
+ (higher overhead, but lower interrupt latency)
+ "y" turns interrupts off every loop
+ (low overhead, but longer interrupt latency)
+
+ default behavior is to leave this set to on (type "Y"). If you are experiencing
+ interrupt latency issues, it is safe and OK to turn this off.
+
endmenu
@@ -933,13 +960,6 @@ endchoice
comment "Possible Suspend Mem / Hibernate Wake-Up Sources"
depends on PM
-config PM_BFIN_WAKE_RTC
- bool "Allow Wake-Up from RESET and on-chip RTC"
- depends on PM
- default n
- help
- Enable RTC Wake-Up (Voltage Regulator Power-Up)
-
config PM_BFIN_WAKE_PH6
bool "Allow Wake-Up from on-chip PHY or PH6 GP"
depends on PM && (BF52x || BF534 || BF536 || BF537)
@@ -947,41 +967,12 @@ config PM_BFIN_WAKE_PH6
help
Enable PHY and PH6 GP Wake-Up (Voltage Regulator Power-Up)
-config PM_BFIN_WAKE_CAN
- bool "Allow Wake-Up from on-chip CAN0/1"
- depends on PM && (BF54x || BF534 || BF536 || BF537)
- default n
- help
- Enable CAN0/1 Wake-Up (Voltage Regulator Power-Up)
-
config PM_BFIN_WAKE_GP
bool "Allow Wake-Up from GPIOs"
depends on PM && BF54x
default n
help
Enable General-Purpose Wake-Up (Voltage Regulator Power-Up)
-
-config PM_BFIN_WAKE_USB
- bool "Allow Wake-Up from on-chip USB"
- depends on PM && (BF54x || BF52x)
- default n
- help
- Enable USB Wake-Up (Voltage Regulator Power-Up)
-
-config PM_BFIN_WAKE_KEYPAD
- bool "Allow Wake-Up from on-chip Keypad"
- depends on PM && BF54x
- default n
- help
- Enable Keypad Wake-Up (Voltage Regulator Power-Up)
-
-config PM_BFIN_WAKE_ROTARY
- bool "Allow Wake-Up from on-chip Rotary"
- depends on PM && BF54x
- default n
- help
- Enable Rotary Wake-Up (Voltage Regulator Power-Up)
-
endmenu
menu "CPU Frequency scaling"
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
new file mode 100644
index 000000000000..2921f9952d5f
--- /dev/null
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -0,0 +1,1195 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.26.2
+#
+# CONFIG_MMU is not set
+# CONFIG_FPU is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+CONFIG_BLACKFIN=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_FORCE_MAX_ZONEORDER=14
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_GROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_KPROBES is not set
+# CONFIG_HAVE_KRETPROBES is not set
+# CONFIG_HAVE_DMA_ATTRS is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+
+#
+# Blackfin Processor Options
+#
+
+#
+# Processor and Board Settings
+#
+# CONFIG_BF522 is not set
+# CONFIG_BF523 is not set
+# CONFIG_BF524 is not set
+# CONFIG_BF525 is not set
+# CONFIG_BF526 is not set
+# CONFIG_BF527 is not set
+# CONFIG_BF531 is not set
+CONFIG_BF532=y
+# CONFIG_BF533 is not set
+# CONFIG_BF534 is not set
+# CONFIG_BF536 is not set
+# CONFIG_BF537 is not set
+# CONFIG_BF542 is not set
+# CONFIG_BF544 is not set
+# CONFIG_BF547 is not set
+# CONFIG_BF548 is not set
+# CONFIG_BF549 is not set
+# CONFIG_BF561 is not set
+# CONFIG_BF_REV_0_0 is not set
+# CONFIG_BF_REV_0_1 is not set
+# CONFIG_BF_REV_0_2 is not set
+# CONFIG_BF_REV_0_3 is not set
+# CONFIG_BF_REV_0_4 is not set
+CONFIG_BF_REV_0_5=y
+# CONFIG_BF_REV_ANY is not set
+# CONFIG_BF_REV_NONE is not set
+CONFIG_BF53x=y
+CONFIG_MEM_MT48LC32M16A2TG_75=y
+# CONFIG_BFIN533_EZKIT is not set
+# CONFIG_BFIN533_STAMP is not set
+# CONFIG_BFIN533_BLUETECHNIX_CM is not set
+# CONFIG_H8606_HVSISTEMAS is not set
+# CONFIG_BFIN532_IP0X is not set
+CONFIG_BLACKSTAMP=y
+# CONFIG_GENERIC_BF533_BOARD is not set
+
+#
+# BF533/2/1 Specific Configuration
+#
+
+#
+# Interrupt Priority Assignment
+#
+
+#
+# Priority
+#
+CONFIG_UART_ERROR=7
+CONFIG_SPORT0_ERROR=7
+CONFIG_SPI_ERROR=7
+CONFIG_SPORT1_ERROR=7
+CONFIG_PPI_ERROR=7
+CONFIG_DMA_ERROR=7
+CONFIG_PLLWAKE_ERROR=7
+CONFIG_RTC_ERROR=8
+CONFIG_DMA0_PPI=8
+CONFIG_DMA1_SPORT0RX=9
+CONFIG_DMA2_SPORT0TX=9
+CONFIG_DMA3_SPORT1RX=9
+CONFIG_DMA4_SPORT1TX=9
+CONFIG_DMA5_SPI=10
+CONFIG_DMA6_UARTRX=10
+CONFIG_DMA7_UARTTX=10
+CONFIG_TIMER0=11
+CONFIG_TIMER1=11
+CONFIG_TIMER2=11
+CONFIG_PFA=12
+CONFIG_PFB=12
+CONFIG_MEMDMA0=13
+CONFIG_MEMDMA1=13
+CONFIG_WDTIMER=13
+
+#
+# Board customizations
+#
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
+
+#
+# Clock/PLL Setup
+#
+CONFIG_CLKIN_HZ=25000000
+# CONFIG_BFIN_KERNEL_CLOCK is not set
+# CONFIG_PLL_BYPASS is not set
+# CONFIG_CLKIN_HALF is not set
+CONFIG_VCO_MULT=16
+CONFIG_CCLK_DIV_1=y
+# CONFIG_CCLK_DIV_2 is not set
+# CONFIG_CCLK_DIV_4 is not set
+# CONFIG_CCLK_DIV_8 is not set
+CONFIG_SCLK_DIV=3
+CONFIG_MAX_MEM_SIZE=64
+CONFIG_MAX_VCO_HZ=400000000
+CONFIG_MIN_VCO_HZ=50000000
+CONFIG_MAX_SCLK_HZ=133333333
+CONFIG_MIN_SCLK_HZ=27000000
+
+#
+# Kernel Timer/Scheduler
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# Memory Setup
+#
+
+#
+# Misc
+#
+CONFIG_BFIN_SCRATCH_REG_RETN=y
+# CONFIG_BFIN_SCRATCH_REG_RETE is not set
+# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
+
+#
+# Blackfin Kernel Optimizations
+#
+
+#
+# Memory Optimizations
+#
+CONFIG_I_ENTRY_L1=y
+CONFIG_EXCPT_IRQ_SYSC_L1=y
+CONFIG_DO_IRQ_L1=y
+CONFIG_CORE_TIMER_IRQ_L1=y
+CONFIG_IDLE_L1=y
+CONFIG_SCHEDULE_L1=y
+CONFIG_ARITHMETIC_OPS_L1=y
+CONFIG_ACCESS_OK_L1=y
+CONFIG_MEMSET_L1=y
+CONFIG_MEMCPY_L1=y
+CONFIG_SYS_BFIN_SPINLOCK_L1=y
+# CONFIG_IP_CHECKSUM_L1 is not set
+CONFIG_CACHELINE_ALIGNED_L1=y
+# CONFIG_SYSCALL_TAB_L1 is not set
+# CONFIG_CPLB_SWITCH_TAB_L1 is not set
+# CONFIG_RAMKERNEL is not set
+CONFIG_ROMKERNEL=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_VIRT_TO_BUS=y
+CONFIG_BFIN_GPTIMERS=y
+CONFIG_BFIN_DMA_5XX=y
+# CONFIG_DMA_UNCACHED_4M is not set
+# CONFIG_DMA_UNCACHED_2M is not set
+CONFIG_DMA_UNCACHED_1M=y
+# CONFIG_DMA_UNCACHED_NONE is not set
+
+#
+# Cache Support
+#
+CONFIG_BFIN_ICACHE=y
+CONFIG_BFIN_DCACHE=y
+# CONFIG_BFIN_DCACHE_BANKA is not set
+# CONFIG_BFIN_ICACHE_LOCK is not set
+# CONFIG_BFIN_WB is not set
+CONFIG_BFIN_WT=y
+# CONFIG_MPU is not set
+
+#
+# Asynchonous Memory Configuration
+#
+
+#
+# EBIU_AMGCTL Global Control
+#
+CONFIG_C_AMCKEN=y
+CONFIG_C_CDPRIO=y
+# CONFIG_C_AMBEN is not set
+# CONFIG_C_AMBEN_B0 is not set
+# CONFIG_C_AMBEN_B0_B1 is not set
+# CONFIG_C_AMBEN_B0_B1_B2 is not set
+CONFIG_C_AMBEN_ALL=y
+
+#
+# EBIU_AMBCTL Control
+#
+CONFIG_BANK_0=0x7BB0
+CONFIG_BANK_1=0x7BB0
+CONFIG_BANK_2=0x7BB0
+CONFIG_BANK_3=0xAAC2
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+# CONFIG_PCI is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF_FDPIC=y
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+CONFIG_BINFMT_SHARED_FLAT=y
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_PM_BFIN_SLEEP_DEEPER=y
+# CONFIG_PM_BFIN_SLEEP is not set
+# CONFIG_PM_WAKEUP_BY_GPIO is not set
+
+#
+# Possible Suspend Mem / Hibernate Wake-Up Sources
+#
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETLABEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=m
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=m
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=m
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=m
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=m
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+# CONFIG_MTD_UCLINUX is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+CONFIG_MTD_M25P80=y
+# CONFIG_M25PXX_USE_FAST_READ is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_SMC91X=y
+# CONFIG_SMSC911X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_AD9960 is not set
+# CONFIG_SPI_ADC_BF533 is not set
+# CONFIG_BF5xx_PPIFCD is not set
+# CONFIG_BFIN_SIMPLE_TIMER is not set
+CONFIG_BF5xx_PPI=y
+CONFIG_BFIN_SPORT=y
+# CONFIG_BFIN_TIMER_LATENCY is not set
+# CONFIG_TWI_LCD is not set
+CONFIG_SIMPLE_GPIO=m
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+CONFIG_SERIAL_BFIN_DMA=y
+# CONFIG_SERIAL_BFIN_PIO is not set
+CONFIG_SERIAL_BFIN_UART0=y
+# CONFIG_BFIN_UART0_CTSRTS is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_BFIN_SPORT is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# CAN, the car bus and industrial fieldbus
+#
+# CONFIG_CAN4LINUX is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_AD5252 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BITBANG is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_AT25=y
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_BFIN_WDT=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD Host Controller Drivers
+#
+CONFIG_MMC_SPI=y
+# CONFIG_SPI_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_BFIN=y
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_YAFFS_FS=m
+CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
+# CONFIG_YAFFS_DOES_ECC is not set
+CONFIG_YAFFS_YAFFS2=y
+CONFIG_YAFFS_AUTO_YAFFS2=y
+# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
+# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
+# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFSD is not set
+# CONFIG_ROOT_NFS is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_BIND34 is not set
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=y
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=y
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_DEBUG_MMRS=y
+CONFIG_DEBUG_HUNT_FOR_ZERO=y
+CONFIG_DEBUG_BFIN_HWTRACE_ON=y
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
+# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
+# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
+CONFIG_EARLY_PRINTK=y
+CONFIG_CPLB_INFO=y
+CONFIG_ACCESS_CHECK=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
new file mode 100644
index 000000000000..c482ee171f9e
--- /dev/null
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -0,0 +1,693 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.24.7
+# Thu Jul 31 00:53:15 2008
+#
+# CONFIG_MMU is not set
+# CONFIG_FPU is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+CONFIG_BLACKFIN=y
+CONFIG_ZONE_DMA=y
+CONFIG_SEMAPHORE_SLEEPERS=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_FORCE_MAX_ZONEORDER=14
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_FAIR_USER_SCHED=y
+# CONFIG_FAIR_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_UID16 is not set
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_EVENTFD=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+
+#
+# Blackfin Processor Options
+#
+
+#
+# Processor and Board Settings
+#
+# CONFIG_BF522 is not set
+# CONFIG_BF523 is not set
+# CONFIG_BF524 is not set
+# CONFIG_BF525 is not set
+# CONFIG_BF526 is not set
+# CONFIG_BF527 is not set
+# CONFIG_BF531 is not set
+# CONFIG_BF532 is not set
+# CONFIG_BF533 is not set
+# CONFIG_BF534 is not set
+# CONFIG_BF536 is not set
+CONFIG_BF537=y
+# CONFIG_BF542 is not set
+# CONFIG_BF544 is not set
+# CONFIG_BF547 is not set
+# CONFIG_BF548 is not set
+# CONFIG_BF549 is not set
+# CONFIG_BF561 is not set
+# CONFIG_BF_REV_0_0 is not set
+# CONFIG_BF_REV_0_1 is not set
+CONFIG_BF_REV_0_2=y
+# CONFIG_BF_REV_0_3 is not set
+# CONFIG_BF_REV_0_4 is not set
+# CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_ANY is not set
+# CONFIG_BF_REV_NONE is not set
+CONFIG_BF53x=y
+CONFIG_IRQ_PLL_WAKEUP=7
+CONFIG_IRQ_RTC=8
+CONFIG_IRQ_PPI=8
+CONFIG_IRQ_SPORT0_RX=9
+CONFIG_IRQ_SPORT0_TX=9
+CONFIG_IRQ_SPORT1_RX=9
+CONFIG_IRQ_SPORT1_TX=9
+CONFIG_IRQ_TWI=10
+CONFIG_IRQ_SPI=10
+CONFIG_IRQ_UART0_RX=10
+CONFIG_IRQ_UART0_TX=10
+CONFIG_IRQ_UART1_RX=10
+CONFIG_IRQ_UART1_TX=10
+CONFIG_IRQ_MAC_RX=11
+CONFIG_IRQ_MAC_TX=11
+CONFIG_IRQ_TMR0=12
+CONFIG_IRQ_TMR1=12
+CONFIG_IRQ_TMR2=12
+CONFIG_IRQ_TMR3=12
+CONFIG_IRQ_TMR4=12
+CONFIG_IRQ_TMR5=12
+CONFIG_IRQ_TMR6=12
+CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_PORTG_INTB=12
+CONFIG_IRQ_MEM_DMA0=13
+CONFIG_IRQ_MEM_DMA1=13
+CONFIG_IRQ_WATCH=13
+# CONFIG_BFIN537_STAMP is not set
+# CONFIG_BFIN537_BLUETECHNIX_CM is not set
+CONFIG_BFIN537_BLUETECHNIX_TCM=y
+# CONFIG_PNAV10 is not set
+# CONFIG_CAMSIG_MINOTAUR is not set
+# CONFIG_GENERIC_BF537_BOARD is not set
+
+#
+# BF537 Specific Configuration
+#
+
+#
+# Interrupt Priority Assignment
+#
+
+#
+# Priority
+#
+CONFIG_IRQ_DMA_ERROR=7
+CONFIG_IRQ_ERROR=7
+CONFIG_IRQ_CAN_RX=11
+CONFIG_IRQ_CAN_TX=11
+CONFIG_IRQ_PROG_INTA=12
+
+#
+# Board customizations
+#
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
+
+#
+# Clock/PLL Setup
+#
+CONFIG_CLKIN_HZ=25000000
+# CONFIG_BFIN_KERNEL_CLOCK is not set
+CONFIG_MAX_MEM_SIZE=32
+CONFIG_MAX_VCO_HZ=600000000
+CONFIG_MIN_VCO_HZ=50000000
+CONFIG_MAX_SCLK_HZ=133333333
+CONFIG_MIN_SCLK_HZ=27000000
+
+#
+# Kernel Timer/Scheduler
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# Misc
+#
+CONFIG_BFIN_SCRATCH_REG_RETN=y
+# CONFIG_BFIN_SCRATCH_REG_RETE is not set
+# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
+
+#
+# Blackfin Kernel Optimizations
+#
+
+#
+# Memory Optimizations
+#
+CONFIG_I_ENTRY_L1=y
+CONFIG_EXCPT_IRQ_SYSC_L1=y
+CONFIG_DO_IRQ_L1=y
+CONFIG_CORE_TIMER_IRQ_L1=y
+CONFIG_IDLE_L1=y
+CONFIG_SCHEDULE_L1=y
+CONFIG_ARITHMETIC_OPS_L1=y
+CONFIG_ACCESS_OK_L1=y
+CONFIG_MEMSET_L1=y
+CONFIG_MEMCPY_L1=y
+CONFIG_SYS_BFIN_SPINLOCK_L1=y
+CONFIG_IP_CHECKSUM_L1=y
+CONFIG_CACHELINE_ALIGNED_L1=y
+CONFIG_SYSCALL_TAB_L1=y
+CONFIG_CPLB_SWITCH_TAB_L1=y
+CONFIG_RAMKERNEL=y
+# CONFIG_ROMKERNEL is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_BFIN_GPTIMERS is not set
+CONFIG_BFIN_DMA_5XX=y
+# CONFIG_DMA_UNCACHED_4M is not set
+# CONFIG_DMA_UNCACHED_2M is not set
+CONFIG_DMA_UNCACHED_1M=y
+# CONFIG_DMA_UNCACHED_NONE is not set
+
+#
+# Cache Support
+#
+CONFIG_BFIN_ICACHE=y
+CONFIG_BFIN_DCACHE=y
+# CONFIG_BFIN_DCACHE_BANKA is not set
+# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
+# CONFIG_MPU is not set
+
+#
+# Asynchonous Memory Configuration
+#
+
+#
+# EBIU_AMGCTL Global Control
+#
+CONFIG_C_AMCKEN=y
+CONFIG_C_CDPRIO=y
+# CONFIG_C_AMBEN is not set
+# CONFIG_C_AMBEN_B0 is not set
+# CONFIG_C_AMBEN_B0_B1 is not set
+# CONFIG_C_AMBEN_B0_B1_B2 is not set
+CONFIG_C_AMBEN_ALL=y
+
+#
+# EBIU_AMBCTL Control
+#
+CONFIG_BANK_0=0x7BB0
+CONFIG_BANK_1=0x7BB0
+CONFIG_BANK_2=0x7BB0
+CONFIG_BANK_3=0xFFC2
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+# CONFIG_PCI is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF_FDPIC=y
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+CONFIG_BINFMT_SHARED_FLAT=y
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_SUSPEND_UP_POSSIBLE=y
+# CONFIG_PM_WAKEUP_BY_GPIO is not set
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Networking
+#
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_RAM=y
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+CONFIG_MTD_UCLINUX=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_CDROM_PKTCDVD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_AD9960 is not set
+# CONFIG_SPI_ADC_BF533 is not set
+# CONFIG_BF5xx_PPIFCD is not set
+# CONFIG_BFIN_SIMPLE_TIMER is not set
+# CONFIG_BF5xx_PPI is not set
+CONFIG_BFIN_SPORT=y
+# CONFIG_BFIN_TIMER_LATENCY is not set
+# CONFIG_SIMPLE_GPIO is not set
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+CONFIG_SERIAL_BFIN_DMA=y
+# CONFIG_SERIAL_BFIN_PIO is not set
+CONFIG_SERIAL_BFIN_UART0=y
+# CONFIG_BFIN_UART0_CTSRTS is not set
+CONFIG_SERIAL_BFIN_UART1=y
+# CONFIG_BFIN_UART1_CTSRTS is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_BFIN_SPORT is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# CAN, the car bus and industrial fieldbus
+#
+# CONFIG_CAN4LINUX is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BITBANG is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_BFIN_WDT=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_RTC_CLASS is not set
+
+#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_YAFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_INSTRUMENTATION is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_DEBUG_MMRS=y
+CONFIG_DEBUG_HUNT_FOR_ZERO=y
+CONFIG_DEBUG_BFIN_HWTRACE_ON=y
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
+# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
+# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_CPLB_INFO=y
+CONFIG_ACCESS_CHECK=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITY_NETWORK is not set
+CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/kernel/cplb-mpu/cacheinit.c b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
index 9eecfa403187..a8b712a24c59 100644
--- a/arch/blackfin/kernel/cplb-mpu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
@@ -25,7 +25,7 @@
#include <asm/cplbinit.h>
#if defined(CONFIG_BFIN_ICACHE)
-void bfin_icache_init(void)
+void __init bfin_icache_init(void)
{
unsigned long ctrl;
int i;
@@ -43,7 +43,7 @@ void bfin_icache_init(void)
#endif
#if defined(CONFIG_BFIN_DCACHE)
-void bfin_dcache_init(void)
+void __init bfin_dcache_init(void)
{
unsigned long ctrl;
int i;
diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
index 8a18399f6072..bd0831592c2c 100644
--- a/arch/blackfin/kernel/cplb-nompu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
@@ -25,7 +25,7 @@
#include <asm/cplbinit.h>
#if defined(CONFIG_BFIN_ICACHE)
-void bfin_icache_init(void)
+void __init bfin_icache_init(void)
{
unsigned long *table = icplb_table;
unsigned long ctrl;
@@ -47,7 +47,7 @@ void bfin_icache_init(void)
#endif
#if defined(CONFIG_BFIN_DCACHE)
-void bfin_dcache_init(void)
+void __init bfin_dcache_init(void)
{
unsigned long *table = dcplb_table;
unsigned long ctrl;
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 224e7cc30bc5..728f708d3981 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -164,17 +164,13 @@ static struct cplb_desc cplb_data[] = {
.name = "Asynchronous Memory Banks",
},
{
-#ifdef L2_START
.start = L2_START,
.end = L2_START + L2_LENGTH,
.psize = SIZE_1M,
.attr = SWITCH_T | I_CPLB | D_CPLB,
.i_conf = L2_MEMORY,
.d_conf = L2_MEMORY,
- .valid = 1,
-#else
- .valid = 0,
-#endif
+ .valid = (L2_LENGTH > 0),
.name = "L2 Memory",
},
{
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 23e637eb78da..7a82d10b4ebf 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -52,6 +52,7 @@ EXPORT_SYMBOL(mtd_size);
#endif
char __initdata command_line[COMMAND_LINE_SIZE];
+unsigned int __initdata *__retx;
/* boot memmap, for parsing "memmap=" */
#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
@@ -131,14 +132,14 @@ void __init bf53x_relocate_l1_mem(void)
dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
l1_data_a_length, l1_data_b_length);
-#ifdef L2_LENGTH
- l2_length = _ebss_l2 - _stext_l2;
- if (l2_length > L2_LENGTH)
- panic("L2 SRAM Overflow\n");
+ if (L2_LENGTH != 0) {
+ l2_length = _ebss_l2 - _stext_l2;
+ if (l2_length > L2_LENGTH)
+ panic("L2 SRAM Overflow\n");
- /* Copy _stext_l2 to _edata_l2 to L2 SRAM */
- dma_memcpy(_stext_l2, _l2_lma_start, l2_length);
-#endif
+ /* Copy _stext_l2 to _edata_l2 to L2 SRAM */
+ dma_memcpy(_stext_l2, _l2_lma_start, l2_length);
+ }
}
/* add_memory_region to memmap */
@@ -738,6 +739,16 @@ void __init setup_arch(char **cmdline_p)
memory_setup();
+ /* Initialize Async memory banks */
+ bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
+ bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
+ bfin_write_EBIU_AMGCTL(AMGCTLVAL);
+#ifdef CONFIG_EBIU_MBSCTLVAL
+ bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
+ bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
+ bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
+#endif
+
cclk = get_cclk();
sclk = get_sclk();
@@ -775,7 +786,11 @@ void __init setup_arch(char **cmdline_p)
bfin_write_SWRST(DOUBLE_FAULT);
if (_bfin_swrst & RESET_DOUBLE)
- printk(KERN_INFO "Recovering from Double Fault event\n");
+ /*
+ * don't decode the address, since you don't know if this
+ * kernel's symbol map is the same as the crashing kernel
+ */
+ printk(KERN_INFO "Recovering from Double Fault event at %pF\n", __retx);
else if (_bfin_swrst & RESET_WDOG)
printk(KERN_INFO "Recovering from Watchdog event\n");
else if (_bfin_swrst & RESET_SOFTWARE)
@@ -1049,7 +1064,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
BFIN_DLINES);
#ifdef CONFIG_BFIN_ICACHE_LOCK
- switch (read_iloc()) {
+ switch ((bfin_read_IMEM_CONTROL() >> 3) & WAYALL_L) {
case WAY0_L:
seq_printf(m, "Way0 Locked-Down\n");
break;
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index ad922ab91543..9a9d5083acfd 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -567,7 +567,7 @@ bool get_instruction(unsigned short *val, unsigned short *address)
* we don't read something in the async space that can hang forever
*/
if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) ||
-#ifdef L2_START
+#if L2_LENGTH != 0
(addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) ||
#endif
(addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) ||
@@ -601,12 +601,55 @@ bool get_instruction(unsigned short *val, unsigned short *address)
return false;
}
+/*
+ * decode the instruction if we are printing out the trace, as it
+ * makes things easier to follow, without running it through objdump
+ * These are the normal instructions which cause change of flow, which
+ * would be at the source of the trace buffer
+ */
+void decode_instruction(unsigned short *address)
+{
+ unsigned short opcode;
+
+ if (get_instruction(&opcode, address)) {
+ if (opcode == 0x0010)
+ printk("RTS");
+ else if (opcode == 0x0011)
+ printk("RTI");
+ else if (opcode == 0x0012)
+ printk("RTX");
+ else if (opcode >= 0x0050 && opcode <= 0x0057)
+ printk("JUMP (P%i)", opcode & 7);
+ else if (opcode >= 0x0060 && opcode <= 0x0067)
+ printk("CALL (P%i)", opcode & 7);
+ else if (opcode >= 0x0070 && opcode <= 0x0077)
+ printk("CALL (PC+P%i)", opcode & 7);
+ else if (opcode >= 0x0080 && opcode <= 0x0087)
+ printk("JUMP (PC+P%i)", opcode & 7);
+ else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF))
+ printk("IF !CC JUMP");
+ else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff))
+ printk("IF CC JUMP");
+ else if (opcode >= 0x2000 && opcode <= 0x2fff)
+ printk("JUMP.S");
+ else if (opcode >= 0xe080 && opcode <= 0xe0ff)
+ printk("LSETUP");
+ else if (opcode >= 0xe200 && opcode <= 0xe2ff)
+ printk("JUMP.L");
+ else if (opcode >= 0xe300 && opcode <= 0xe3ff)
+ printk("CALL pcrel");
+ else
+ printk("0x%04x", opcode);
+ }
+
+}
+
void dump_bfin_trace_buffer(void)
{
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
int tflags, i = 0;
char buf[150];
- unsigned short val = 0, *addr;
+ unsigned short *addr;
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
int j, index;
#endif
@@ -615,6 +658,10 @@ void dump_bfin_trace_buffer(void)
printk(KERN_NOTICE "Hardware Trace:\n");
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
+ printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n");
+#endif
+
if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
decode_address(buf, (unsigned long)bfin_read_TBUF());
@@ -622,45 +669,14 @@ void dump_bfin_trace_buffer(void)
addr = (unsigned short *)bfin_read_TBUF();
decode_address(buf, (unsigned long)addr);
printk(KERN_NOTICE " Source : %s ", buf);
- if (get_instruction(&val, addr)) {
- if (val == 0x0010)
- printk("RTS");
- else if (val == 0x0011)
- printk("RTI");
- else if (val == 0x0012)
- printk("RTX");
- else if (val >= 0x0050 && val <= 0x0057)
- printk("JUMP (P%i)", val & 7);
- else if (val >= 0x0060 && val <= 0x0067)
- printk("CALL (P%i)", val & 7);
- else if (val >= 0x0070 && val <= 0x0077)
- printk("CALL (PC+P%i)", val & 7);
- else if (val >= 0x0080 && val <= 0x0087)
- printk("JUMP (PC+P%i)", val & 7);
- else if ((val >= 0x1000 && val <= 0x13FF) ||
- (val >= 0x1800 && val <= 0x1BFF))
- printk("IF !CC JUMP");
- else if ((val >= 0x1400 && val <= 0x17ff) ||
- (val >= 0x1c00 && val <= 0x1fff))
- printk("IF CC JUMP");
- else if (val >= 0x2000 && val <= 0x2fff)
- printk("JUMP.S");
- else if (val >= 0xe080 && val <= 0xe0ff)
- printk("LSETUP");
- else if (val >= 0xe200 && val <= 0xe2ff)
- printk("JUMP.L");
- else if (val >= 0xe300 && val <= 0xe3ff)
- printk("CALL pcrel");
- else
- printk("0x%04x", val);
- }
+ decode_instruction(addr);
printk("\n");
}
}
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
if (trace_buff_offset)
- index = trace_buff_offset/4 - 1;
+ index = trace_buff_offset / 4;
else
index = EXPAND_LEN;
@@ -672,7 +688,9 @@ void dump_bfin_trace_buffer(void)
if (index < 0 )
index = EXPAND_LEN;
decode_address(buf, software_trace_buff[index]);
- printk(KERN_NOTICE " Source : %s\n", buf);
+ printk(KERN_NOTICE " Source : %s ", buf);
+ decode_instruction((unsigned short *)software_trace_buff[index]);
+ printk("\n");
index -= 1;
if (index < 0)
index = EXPAND_LEN;
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 0896e38d6108..7d12c6692a65 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -83,6 +83,7 @@ SECTIONS
#if !L1_DATA_B_LENGTH
*(.l1.bss.B)
#endif
+ . = ALIGN(4);
___bss_stop = .;
}
@@ -101,7 +102,7 @@ SECTIONS
#if !L1_DATA_B_LENGTH
*(.l1.data.B)
#endif
-#ifndef L2_LENGTH
+#if !L2_LENGTH
. = ALIGN(32);
*(.data_l2.cacheline_aligned)
*(.l2.data)
@@ -211,20 +212,19 @@ SECTIONS
__ebss_b_l1 = .;
}
-#ifdef L2_LENGTH
__l2_lma_start = .;
.text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
{
. = ALIGN(4);
__stext_l2 = .;
- *(.l1.text)
+ *(.l2.text)
. = ALIGN(4);
__etext_l2 = .;
. = ALIGN(4);
__sdata_l2 = .;
- *(.l1.data)
+ *(.l2.data)
__edata_l2 = .;
. = ALIGN(32);
@@ -232,11 +232,10 @@ SECTIONS
. = ALIGN(4);
__sbss_l2 = .;
- *(.l1.bss)
+ *(.l2.bss)
. = ALIGN(4);
__ebss_l2 = .;
}
-#endif
/* Force trailing alignment of our init section so that when we
* free our init memory, we don't leave behind a partial page.
diff --git a/arch/blackfin/lib/ins.S b/arch/blackfin/lib/ins.S
index eba2343b1b59..d60554dce87b 100644
--- a/arch/blackfin/lib/ins.S
+++ b/arch/blackfin/lib/ins.S
@@ -33,7 +33,28 @@
.align 2
+/*
+ * Reads on the Blackfin are speculative. In Blackfin terms, this means they
+ * can be interrupted at any time (even after they have been issued on to the
+ * external bus), and re-issued after the interrupt occurs.
+ *
+ * If a FIFO is sitting on the end of the read, it will see two reads,
+ * when the core only sees one. The FIFO receives the read which is cancelled,
+ * and not delivered to the core.
+ *
+ * To solve this, interrupts are turned off before reads occur to I/O space.
+ * There are 3 versions of all these functions
+ * - turns interrupts off every read (higher overhead, but lower latency)
+ * - turns interrupts off every loop (low overhead, but longer latency)
+ * - DMA version, which do not suffer from this issue. DMA versions have
+ * different name (prefixed by dma_ ), and are located in
+ * ../kernel/bfin_dma_5xx.c
+ * Using the dma related functions are recommended for transfering large
+ * buffers in/out of FIFOs.
+ */
+
ENTRY(_insl)
+#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
cli R3;
P1 = R1; /* P1 = address */
@@ -46,9 +67,26 @@ ENTRY(_insl)
.Llong_loop_e: NOP;
sti R3;
RTS;
+#else
+ P0 = R0; /* P0 = port */
+ P1 = R1; /* P1 = address */
+ P2 = R2; /* P2 = count */
+ SSYNC;
+ LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
+.Llong_loop_s:
+ CLI R3;
+ NOP; NOP; NOP;
+ R0 = [P0];
+ [P1++] = R0;
+.Llong_loop_e:
+ STI R3;
+
+ RTS;
+#endif
ENDPROC(_insl)
ENTRY(_insw)
+#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
cli R3;
P1 = R1; /* P1 = address */
@@ -61,9 +99,26 @@ ENTRY(_insw)
.Lword_loop_e: NOP;
sti R3;
RTS;
+#else
+ P0 = R0; /* P0 = port */
+ P1 = R1; /* P1 = address */
+ P2 = R2; /* P2 = count */
+ SSYNC;
+ LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
+.Lword_loop_s:
+ CLI R3;
+ NOP; NOP; NOP;
+ R0 = W[P0];
+ W[P1++] = R0;
+.Lword_loop_e:
+ STI R3;
+ RTS;
+
+#endif
ENDPROC(_insw)
ENTRY(_insw_8)
+#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
cli R3;
P1 = R1; /* P1 = address */
@@ -78,9 +133,29 @@ ENTRY(_insw_8)
.Lword8_loop_e: NOP;
sti R3;
RTS;
+#else
+ P0 = R0; /* P0 = port */
+ P1 = R1; /* P1 = address */
+ P2 = R2; /* P2 = count */
+ SSYNC;
+ LSETUP( .Lword8_loop_s, .Lword8_loop_e) LC0 = P2;
+.Lword8_loop_s:
+ CLI R3;
+ NOP; NOP; NOP;
+ R0 = W[P0];
+ B[P1++] = R0;
+ R0 = R0 >> 8;
+ B[P1++] = R0;
+ NOP;
+.Lword8_loop_e:
+ STI R3;
+
+ RTS;
+#endif
ENDPROC(_insw_8)
ENTRY(_insb)
+#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
cli R3;
P1 = R1; /* P1 = address */
@@ -93,9 +168,26 @@ ENTRY(_insb)
.Lbyte_loop_e: NOP;
sti R3;
RTS;
+#else
+ P0 = R0; /* P0 = port */
+ P1 = R1; /* P1 = address */
+ P2 = R2; /* P2 = count */
+ SSYNC;
+ LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
+.Lbyte_loop_s:
+ CLI R3;
+ NOP; NOP; NOP;
+ R0 = B[P0];
+ B[P1++] = R0;
+.Lbyte_loop_e:
+ STI R3;
+
+ RTS;
+#endif
ENDPROC(_insb)
ENTRY(_insl_16)
+#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
cli R3;
P1 = R1; /* P1 = address */
@@ -110,4 +202,21 @@ ENTRY(_insl_16)
.Llong16_loop_e: NOP;
sti R3;
RTS;
+#else
+ P0 = R0; /* P0 = port */
+ P1 = R1; /* P1 = address */
+ P2 = R2; /* P2 = count */
+ SSYNC;
+ LSETUP( .Llong16_loop_s, .Llong16_loop_e) LC0 = P2;
+.Llong16_loop_s:
+ CLI R3;
+ NOP; NOP; NOP;
+ R0 = [P0];
+ W[P1++] = R0;
+ R0 = R0 >> 16;
+ W[P1++] = R0;
+.Llong16_loop_e:
+ STI R3;
+ RTS;
+#endif
ENDPROC(_insl_16)
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 0b26ae2de5ee..d22bc7773717 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -39,7 +39,6 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/pata_platform.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -160,15 +159,15 @@ static struct platform_device musb_device = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition ezkit_partitions[] = {
{
- .name = "Bootloader",
+ .name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
- .name = "Kernel",
+ .name = "linux kernel(nor)",
.size = 0x1C0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "RootFS",
+ .name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -200,12 +199,12 @@ static struct platform_device ezkit_flash_device = {
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
static struct mtd_partition partition_info[] = {
{
- .name = "Linux Kernel",
+ .name = "linux kernel(nand)",
.offset = 0,
.size = 4 * SIZE_1M,
},
{
- .name = "File System",
+ .name = "file system(nand)",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
@@ -438,12 +437,12 @@ static struct platform_device net2272_bfin_device = {
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "linux kernel",
+ .name = "linux kernel(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -799,43 +798,6 @@ static struct platform_device bfin_sport1_uart_device = {
};
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 55
-
-static struct pata_platform_info bfin_pata_platform_data = {
- .ioport_shift = 1,
- .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
-};
-
-static struct resource bfin_pata_resources[] = {
- {
- .start = 0x20314020,
- .end = 0x2031403F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x2031401C,
- .end = 0x2031401F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = PATA_INT,
- .end = PATA_INT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device bfin_pata_device = {
- .name = "pata_platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(bfin_pata_resources),
- .resource = bfin_pata_resources,
- .dev = {
- .platform_data = &bfin_pata_platform_data,
- }
-};
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/input.h>
#include <linux/gpio_keys.h>
@@ -961,10 +923,6 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_sport1_uart_device,
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- &bfin_pata_device,
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
@@ -987,10 +945,6 @@ static int __init stamp_init(void)
platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
-
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
-#endif
return 0;
}
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 689b69c98ee4..762f754c06cc 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -38,7 +38,6 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/ata_platform.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -177,15 +176,15 @@ static struct platform_device bf52x_t350mcqb_device = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition ezkit_partitions[] = {
{
- .name = "Bootloader",
+ .name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
- .name = "Kernel",
+ .name = "linux kernel(nor)",
.size = 0x1C0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "RootFS",
+ .name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -217,12 +216,12 @@ static struct platform_device ezkit_flash_device = {
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
static struct mtd_partition partition_info[] = {
{
- .name = "Linux Kernel",
+ .name = "linux kernel(nand)",
.offset = 0,
.size = 4 * SIZE_1M,
},
{
- .name = "File System",
+ .name = "file system(nand)",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
@@ -460,12 +459,12 @@ static struct platform_device net2272_bfin_device = {
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "linux kernel",
+ .name = "linux kernel(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -825,43 +824,6 @@ static struct platform_device bfin_sport1_uart_device = {
};
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 55
-
-static struct pata_platform_info bfin_pata_platform_data = {
- .ioport_shift = 1,
- .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
-};
-
-static struct resource bfin_pata_resources[] = {
- {
- .start = 0x20314020,
- .end = 0x2031403F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x2031401C,
- .end = 0x2031401F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = PATA_INT,
- .end = PATA_INT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device bfin_pata_device = {
- .name = "pata_platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(bfin_pata_resources),
- .resource = bfin_pata_resources,
- .dev = {
- .platform_data = &bfin_pata_platform_data,
- }
-};
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/input.h>
#include <linux/gpio_keys.h>
@@ -996,10 +958,6 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_sport1_uart_device,
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- &bfin_pata_device,
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
@@ -1022,10 +980,6 @@ static int __init stamp_init(void)
platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
-
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
-#endif
return 0;
}
diff --git a/arch/blackfin/mach-bf527/head.S b/arch/blackfin/mach-bf527/head.S
index fe05cc1ef174..c3334cc5bcb7 100644
--- a/arch/blackfin/mach-bf527/head.S
+++ b/arch/blackfin/mach-bf527/head.S
@@ -30,293 +30,11 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
-#include <asm/trace.h>
-
#ifdef CONFIG_BFIN_KERNEL_CLOCK
#include <asm/mach-common/clocks.h>
#include <asm/mach/mem_init.h>
#endif
-.extern ___bss_stop
-.extern ___bss_start
-.extern _bf53x_relocate_l1_mem
-
-#define INITIAL_STACK 0xFFB01000
-
-__INIT
-
-ENTRY(__start)
- /* R0: argument of command line string, passed from uboot, save it */
- R7 = R0;
- /* Enable Cycle Counter and Nesting Of Interrupts */
-#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
- R0 = SYSCFG_SNEN;
-#else
- R0 = SYSCFG_SNEN | SYSCFG_CCEN;
-#endif
- SYSCFG = R0;
- R0 = 0;
-
- /* Clear Out All the data and pointer Registers */
- R1 = R0;
- R2 = R0;
- R3 = R0;
- R4 = R0;
- R5 = R0;
- R6 = R0;
-
- P0 = R0;
- P1 = R0;
- P2 = R0;
- P3 = R0;
- P4 = R0;
- P5 = R0;
-
- LC0 = r0;
- LC1 = r0;
- L0 = r0;
- L1 = r0;
- L2 = r0;
- L3 = r0;
-
- /* Clear Out All the DAG Registers */
- B0 = r0;
- B1 = r0;
- B2 = r0;
- B3 = r0;
-
- I0 = r0;
- I1 = r0;
- I2 = r0;
- I3 = r0;
-
- M0 = r0;
- M1 = r0;
- M2 = r0;
- M3 = r0;
-
- trace_buffer_init(p0,r0);
- P0 = R1;
- R0 = R1;
-
- /* Turn off the icache */
- p0.l = LO(IMEM_CONTROL);
- p0.h = HI(IMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENICPLB;
- R0 = R0 & R1;
-
- /* Anomaly 05000125 */
-#if ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
- [p0] = R0;
- SSYNC;
-#if ANOMALY_05000125
- STI R2;
-#endif
-
- /* Turn off the dcache */
- p0.l = LO(DMEM_CONTROL);
- p0.h = HI(DMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENDCPLB;
- R0 = R0 & R1;
-
- /* Anomaly 05000125 */
-#if ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
- [p0] = R0;
- SSYNC;
-#if ANOMALY_05000125
- STI R2;
-#endif
-
-
-#if defined(CONFIG_BF527)
- p0.h = hi(EMAC_SYSTAT);
- p0.l = lo(EMAC_SYSTAT);
- R0.h = 0xFFFF; /* Clear EMAC Interrupt Status bits */
- R0.l = 0xFFFF;
- [P0] = R0;
- SSYNC;
-#endif
-
- /* Initialise UART - when booting from u-boot, the UART is not disabled
- * so if we dont initalize here, our serial console gets hosed */
- p0.h = hi(UART1_LCR);
- p0.l = lo(UART1_LCR);
- r0 = 0x0(Z);
- w[p0] = r0.L; /* To enable DLL writes */
- ssync;
-
- p0.h = hi(UART1_DLL);
- p0.l = lo(UART1_DLL);
- r0 = 0x0(Z);
- w[p0] = r0.L;
- ssync;
-
- p0.h = hi(UART1_DLH);
- p0.l = lo(UART1_DLH);
- r0 = 0x00(Z);
- w[p0] = r0.L;
- ssync;
-
- p0.h = hi(UART1_GCTL);
- p0.l = lo(UART1_GCTL);
- r0 = 0x0(Z);
- w[p0] = r0.L; /* To enable UART clock */
- ssync;
-
- /* Initialize stack pointer */
- sp.l = lo(INITIAL_STACK);
- sp.h = hi(INITIAL_STACK);
- fp = sp;
- usp = sp;
-
-#ifdef CONFIG_EARLY_PRINTK
- SP += -12;
- call _init_early_exception_vectors;
- SP += 12;
-#endif
-
- /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
- call _bf53x_relocate_l1_mem;
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
- call _start_dma_code;
-#endif
-
- /* Code for initializing Async memory banks */
-
- p2.h = hi(EBIU_AMBCTL1);
- p2.l = lo(EBIU_AMBCTL1);
- r0.h = hi(AMBCTL1VAL);
- r0.l = lo(AMBCTL1VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMBCTL0);
- p2.l = lo(EBIU_AMBCTL0);
- r0.h = hi(AMBCTL0VAL);
- r0.l = lo(AMBCTL0VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMGCTL);
- p2.l = lo(EBIU_AMGCTL);
- r0 = AMGCTLVAL;
- w[p2] = r0;
- ssync;
-
- /* This section keeps the processor in supervisor mode
- * during kernel boot. Switches to user mode at end of boot.
- * See page 3-9 of Hardware Reference manual for documentation.
- */
-
- /* EVT15 = _real_start */
-
- p0.l = lo(EVT15);
- p0.h = hi(EVT15);
- p1.l = _real_start;
- p1.h = _real_start;
- [p0] = p1;
- csync;
-
- p0.l = lo(IMASK);
- p0.h = hi(IMASK);
- p1.l = IMASK_IVG15;
- p1.h = 0x0;
- [p0] = p1;
- csync;
-
- raise 15;
- p0.l = .LWAIT_HERE;
- p0.h = .LWAIT_HERE;
- reti = p0;
-#if ANOMALY_05000281
- nop; nop; nop;
-#endif
- rti;
-
-.LWAIT_HERE:
- jump .LWAIT_HERE;
-ENDPROC(__start)
-
-ENTRY(_real_start)
- [ -- sp ] = reti;
- p0.l = lo(WDOG_CTL);
- p0.h = hi(WDOG_CTL);
- r0 = 0xAD6(z);
- w[p0] = r0; /* watchdog off for now */
- ssync;
-
- /* Code update for BSS size == 0
- * Zero out the bss region.
- */
-
- p1.l = ___bss_start;
- p1.h = ___bss_start;
- p2.l = ___bss_stop;
- p2.h = ___bss_stop;
- r0 = 0;
- p2 -= p1;
- lsetup (.L_clear_bss, .L_clear_bss) lc0 = p2;
-.L_clear_bss:
- B[p1++] = r0;
-
- /* In case there is a NULL pointer reference
- * Zero out region before stext
- */
-
- p1.l = 0x0;
- p1.h = 0x0;
- r0.l = __stext;
- r0.h = __stext;
- r0 = r0 >> 1;
- p2 = r0;
- r0 = 0;
- lsetup (.L_clear_zero, .L_clear_zero) lc0 = p2;
-.L_clear_zero:
- W[p1++] = r0;
-
- /* pass the uboot arguments to the global value command line */
- R0 = R7;
- call _cmdline_init;
-
- p1.l = __rambase;
- p1.h = __rambase;
- r0.l = __sdata;
- r0.h = __sdata;
- [p1] = r0;
-
- p1.l = __ramstart;
- p1.h = __ramstart;
- p3.l = ___bss_stop;
- p3.h = ___bss_stop;
-
- r1 = p3;
- [p1] = r1;
-
- /*
- * load the current thread pointer and stack
- */
- r1.l = _init_thread_union;
- r1.h = _init_thread_union;
-
- r2.l = 0x2000;
- r2.h = 0x0000;
- r1 = r1 + r2;
- sp = r1;
- usp = sp;
- fp = sp;
- jump.l _start_kernel;
-ENDPROC(_real_start)
-
-__FINIT
-
.section .l1.text
#ifdef CONFIG_BFIN_KERNEL_CLOCK
ENTRY(_start_dma_code)
@@ -420,13 +138,6 @@ ENTRY(_start_dma_code)
[P2] = R1;
SSYNC;
- p0.h = hi(SIC_IWR0);
- p0.l = lo(SIC_IWR0);
- r0.l = lo(IWR_ENABLE_ALL);
- r0.h = hi(IWR_ENABLE_ALL);
- [p0] = r0;
- SSYNC;
-
RTS;
ENDPROC(_start_dma_code)
#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf527/ints-priority.c b/arch/blackfin/mach-bf527/ints-priority.c
index 1fa389793968..8a2367403d2b 100644
--- a/arch/blackfin/mach-bf527/ints-priority.c
+++ b/arch/blackfin/mach-bf527/ints-priority.c
@@ -31,7 +31,7 @@
#include <linux/irq.h>
#include <asm/blackfin.h>
-void program_IAR(void)
+void __init program_IAR(void)
{
/* Program the IAR0 Register with the configured priority */
bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) |
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 4103a97c1a70..c66a68f30239 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -38,7 +38,6 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
@@ -141,16 +140,16 @@ static struct platform_device net2272_bfin_device = {
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00060000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0x100000,
.offset = 0x60000
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = 0x6a0000,
.offset = 0x00160000,
}
diff --git a/arch/blackfin/mach-bf533/boards/Kconfig b/arch/blackfin/mach-bf533/boards/Kconfig
index 840059241fbe..308c98dc5aba 100644
--- a/arch/blackfin/mach-bf533/boards/Kconfig
+++ b/arch/blackfin/mach-bf533/boards/Kconfig
@@ -14,6 +14,12 @@ config BFIN533_STAMP
help
BF533-STAMP board support.
+config BLACKSTAMP
+ bool "BlackStamp"
+ help
+ Support for the BlackStamp board. Hardware info available at
+ http://blackfin.uclinux.org/gf/project/blackstamp/
+
config BFIN533_BLUETECHNIX_CM
bool "Bluetechnix CM-BF533"
depends on (BF533)
diff --git a/arch/blackfin/mach-bf533/boards/Makefile b/arch/blackfin/mach-bf533/boards/Makefile
index b7a1a1d79bda..9afbe72b484f 100644
--- a/arch/blackfin/mach-bf533/boards/Makefile
+++ b/arch/blackfin/mach-bf533/boards/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_BFIN533_STAMP) += stamp.o
obj-$(CONFIG_BFIN532_IP0X) += ip0x.o
obj-$(CONFIG_BFIN533_EZKIT) += ezkit.o
obj-$(CONFIG_BFIN533_BLUETECHNIX_CM) += cm_bf533.o
+obj-$(CONFIG_BLACKSTAMP) += blackstamp.o
obj-$(CONFIG_H8606_HVSISTEMAS) += H8606.o
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
new file mode 100644
index 000000000000..d064ded87719
--- /dev/null
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -0,0 +1,401 @@
+/*
+ * File: arch/blackfin/mach-bf533/blackstamp.c
+ * Based on: arch/blackfin/mach-bf533/stamp.c
+ * Author: Benjamin Matthews <bmat@lle.rochester.edu>
+ * Aidan Williams <aidan@nicta.com.au>
+ *
+ * Created: 2008
+ * Description: Board Info File for the BlackStamp
+ *
+ * Copyright 2005 National ICT Australia (NICTA)
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * More info about the BlackStamp at:
+ * http://blackfin.uclinux.org/gf/project/blackstamp/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <asm/dma.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/portmux.h>
+#include <asm/dpmc.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "BlackStamp";
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+static struct platform_device rtc_device = {
+ .name = "rtc-bfin",
+ .id = -1,
+};
+#endif
+
+/*
+ * Driver needs to know address, irq and flag pin.
+ */
+#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+static struct resource smc91x_resources[] = {
+ {
+ .name = "smc91x-regs",
+ .start = 0x20300300,
+ .end = 0x20300300 + 16,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_PF3,
+ .end = IRQ_PF3,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+#endif
+
+#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
+static struct mtd_partition bfin_spi_flash_partitions[] = {
+ {
+ .name = "bootloader(spi)",
+ .size = 0x00040000,
+ .offset = 0,
+ .mask_flags = MTD_CAP_ROM
+ }, {
+ .name = "linux kernel(spi)",
+ .size = 0x180000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "file system(spi)",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static struct flash_platform_data bfin_spi_flash_data = {
+ .name = "m25p80",
+ .parts = bfin_spi_flash_partitions,
+ .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
+ .type = "m25p64",
+};
+
+/* SPI flash chip (m25p64) */
+static struct bfin5xx_spi_chip spi_flash_chip_info = {
+ .enable_dma = 0, /* use dma transfer with this chip*/
+ .bits_per_word = 8,
+};
+#endif
+
+#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+static struct bfin5xx_spi_chip spi_mmc_chip_info = {
+ .enable_dma = 1,
+ .bits_per_word = 8,
+};
+#endif
+
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+static struct bfin5xx_spi_chip spidev_chip_info = {
+ .enable_dma = 0,
+ .bits_per_word = 8,
+};
+#endif
+
+static struct spi_board_info bfin_spi_board_info[] __initdata = {
+#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
+ {
+ /* the modalias must be the same as spi device driver name */
+ .modalias = "m25p80", /* Name of spi_driver for this device */
+ .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0, /* Framework bus number */
+ .chip_select = 2, /* Framework chip select. */
+ .platform_data = &bfin_spi_flash_data,
+ .controller_data = &spi_flash_chip_info,
+ .mode = SPI_MODE_3,
+ },
+#endif
+
+#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+ {
+ .modalias = "spi_mmc_dummy",
+ .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = 0,
+ .platform_data = NULL,
+ .controller_data = &spi_mmc_chip_info,
+ .mode = SPI_MODE_3,
+ },
+ {
+ .modalias = "spi_mmc",
+ .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = CONFIG_SPI_MMC_CS_CHAN,
+ .platform_data = NULL,
+ .controller_data = &spi_mmc_chip_info,
+ .mode = SPI_MODE_3,
+ },
+#endif
+
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+ {
+ .modalias = "spidev",
+ .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = 7,
+ .controller_data = &spidev_chip_info,
+ },
+#endif
+};
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+/* SPI (0) */
+static struct resource bfin_spi0_resource[] = {
+ [0] = {
+ .start = SPI0_REGBASE,
+ .end = SPI0_REGBASE + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = CH_SPI,
+ .end = CH_SPI,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+/* SPI controller data */
+static struct bfin5xx_spi_master bfin_spi0_info = {
+ .num_chipselect = 8,
+ .enable_dma = 1, /* master has the ability to do dma transfer */
+ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct platform_device bfin_spi0_device = {
+ .name = "bfin-spi",
+ .id = 0, /* Bus number */
+ .num_resources = ARRAY_SIZE(bfin_spi0_resource),
+ .resource = bfin_spi0_resource,
+ .dev = {
+ .platform_data = &bfin_spi0_info, /* Passed to driver */
+ },
+};
+#endif /* spi master and devices */
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+static struct resource bfin_uart_resources[] = {
+ {
+ .start = 0xFFC00400,
+ .end = 0xFFC004FF,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device bfin_uart_device = {
+ .name = "bfin-uart",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(bfin_uart_resources),
+ .resource = bfin_uart_resources,
+};
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+static struct resource bfin_sir_resources[] = {
+#ifdef CONFIG_BFIN_SIR0
+ {
+ .start = 0xFFC00400,
+ .end = 0xFFC004FF,
+ .flags = IORESOURCE_MEM,
+ },
+#endif
+};
+
+static struct platform_device bfin_sir_device = {
+ .name = "bfin_sir",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bfin_sir_resources),
+ .resource = bfin_sir_resources,
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+static struct platform_device bfin_sport0_uart_device = {
+ .name = "bfin-sport-uart",
+ .id = 0,
+};
+
+static struct platform_device bfin_sport1_uart_device = {
+ .name = "bfin-sport-uart",
+ .id = 1,
+};
+#endif
+
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+static struct gpio_keys_button bfin_gpio_keys_table[] = {
+ {BTN_0, GPIO_PF4, 0, "gpio-keys: BTN0"},
+ {BTN_1, GPIO_PF5, 0, "gpio-keys: BTN1"},
+ {BTN_2, GPIO_PF6, 0, "gpio-keys: BTN2"},
+}; /* Mapped to the first three PF Test Points */
+
+static struct gpio_keys_platform_data bfin_gpio_keys_data = {
+ .buttons = bfin_gpio_keys_table,
+ .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
+};
+
+static struct platform_device bfin_device_gpiokeys = {
+ .name = "gpio-keys",
+ .dev = {
+ .platform_data = &bfin_gpio_keys_data,
+ },
+};
+#endif
+
+static struct resource bfin_gpios_resources = {
+ .start = 0,
+ .end = MAX_BLACKFIN_GPIOS - 1,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+ .name = "simple-gpio",
+ .id = -1,
+ .num_resources = 1,
+ .resource = &bfin_gpios_resources,
+};
+
+#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
+#include <linux/i2c-gpio.h>
+
+static struct i2c_gpio_platform_data i2c_gpio_data = {
+ .sda_pin = 8,
+ .scl_pin = 9,
+ .sda_is_open_drain = 0,
+ .scl_is_open_drain = 0,
+ .udelay = 40,
+}; /* This hasn't actually been used these pins
+ * are (currently) free pins on the expansion connector */
+
+static struct platform_device i2c_gpio_device = {
+ .name = "i2c-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &i2c_gpio_data,
+ },
+};
+#endif
+
+#ifdef CONFIG_I2C_BOARDINFO
+static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
+};
+#endif
+
+static const unsigned int cclk_vlev_datasheet[] =
+{
+ VRPAIR(VLEV_085, 250000000),
+ VRPAIR(VLEV_090, 376000000),
+ VRPAIR(VLEV_095, 426000000),
+ VRPAIR(VLEV_100, 426000000),
+ VRPAIR(VLEV_105, 476000000),
+ VRPAIR(VLEV_110, 476000000),
+ VRPAIR(VLEV_115, 476000000),
+ VRPAIR(VLEV_120, 600000000),
+ VRPAIR(VLEV_125, 600000000),
+ VRPAIR(VLEV_130, 600000000),
+};
+
+static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
+ .tuple_tab = cclk_vlev_datasheet,
+ .tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
+ .vr_settling_time = 25 /* us */,
+};
+
+static struct platform_device bfin_dpmc = {
+ .name = "bfin dpmc",
+ .dev = {
+ .platform_data = &bfin_dmpc_vreg_data,
+ },
+};
+
+static struct platform_device *stamp_devices[] __initdata = {
+
+ &bfin_dpmc,
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+ &rtc_device,
+#endif
+
+#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+ &smc91x_device,
+#endif
+
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+ &bfin_spi0_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+ &bfin_uart_device,
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+ &bfin_sir_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+ &bfin_sport0_uart_device,
+ &bfin_sport1_uart_device,
+#endif
+
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+ &bfin_device_gpiokeys,
+#endif
+
+#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
+ &i2c_gpio_device,
+#endif
+
+ &bfin_gpios_device,
+};
+
+static int __init blackstamp_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "%s(): registering device resources\n", __func__);
+
+#ifdef CONFIG_I2C_BOARDINFO
+ i2c_register_board_info(0, bfin_i2c_board_info,
+ ARRAY_SIZE(bfin_i2c_board_info));
+#endif
+
+ ret = platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
+ if (ret < 0)
+ return ret;
+
+#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+ /* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */
+ bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0);
+ bfin_write_FIO_FLAG_S(PF0);
+ SSYNC();
+#endif
+
+ spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+ return 0;
+}
+
+arch_initcall(blackstamp_init);
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index ed2b0b8f5dc9..575843f6d9ef 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -36,7 +36,6 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
@@ -53,16 +52,16 @@ const char bfin_board_name[] = "Bluetechnix CM BF533";
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
@@ -307,43 +306,6 @@ static struct platform_device isp1362_hcd_device = {
};
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 38
-
-static struct pata_platform_info bfin_pata_platform_data = {
- .ioport_shift = 2,
- .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
-};
-
-static struct resource bfin_pata_resources[] = {
- {
- .start = 0x2030C000,
- .end = 0x2030C01F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x2030D018,
- .end = 0x2030D01B,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = PATA_INT,
- .end = PATA_INT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device bfin_pata_device = {
- .name = "pata_platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(bfin_pata_resources),
- .resource = bfin_pata_resources,
- .dev = {
- .platform_data = &bfin_pata_platform_data,
- }
-};
-#endif
-
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_085, 250000000),
@@ -403,10 +365,6 @@ static struct platform_device *cm_bf533_devices[] __initdata = {
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
&bfin_spi0_device,
#endif
-
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- &bfin_pata_device,
-#endif
};
static int __init cm_bf533_init(void)
@@ -416,10 +374,6 @@ static int __init cm_bf533_init(void)
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
-
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
-#endif
return 0;
}
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 079389cbd859..cc2e7eeb1d5a 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -37,7 +37,6 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
@@ -90,16 +89,16 @@ static struct platform_device smc91x_device = {
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0xe0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -255,43 +254,6 @@ static struct platform_device bfin_sir_device = {
};
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 55
-
-static struct pata_platform_info bfin_pata_platform_data = {
- .ioport_shift = 1,
- .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
-};
-
-static struct resource bfin_pata_resources[] = {
- {
- .start = 0x20314020,
- .end = 0x2031403F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x2031401C,
- .end = 0x2031401F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = PATA_INT,
- .end = PATA_INT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device bfin_pata_device = {
- .name = "pata_platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(bfin_pata_resources),
- .resource = bfin_pata_resources,
- .dev = {
- .platform_data = &bfin_pata_platform_data,
- }
-};
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/input.h>
#include <linux/gpio_keys.h>
@@ -404,10 +366,6 @@ static struct platform_device *ezkit_devices[] __initdata = {
&bfin_sir_device,
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- &bfin_pata_device,
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
@@ -424,10 +382,6 @@ static int __init ezkit_init(void)
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
-
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
-#endif
return 0;
}
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 13ae49515f73..050ffca53530 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -38,7 +38,6 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <asm/dma.h>
@@ -114,15 +113,15 @@ static struct platform_device net2272_bfin_device = {
#if defined(CONFIG_MTD_BFIN_ASYNC) || defined(CONFIG_MTD_BFIN_ASYNC_MODULE)
static struct mtd_partition stamp_partitions[] = {
{
- .name = "Bootloader",
+ .name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
- .name = "Kernel",
+ .name = "linux kernel(nor)",
.size = 0xE0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "RootFS",
+ .name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -164,16 +163,16 @@ static struct platform_device stamp_flash_device = {
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0xe0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -404,43 +403,6 @@ static struct platform_device bfin_sport1_uart_device = {
};
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 55
-
-static struct pata_platform_info bfin_pata_platform_data = {
- .ioport_shift = 1,
- .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
-};
-
-static struct resource bfin_pata_resources[] = {
- {
- .start = 0x20314020,
- .end = 0x2031403F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x2031401C,
- .end = 0x2031401F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = PATA_INT,
- .end = PATA_INT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device bfin_pata_device = {
- .name = "pata_platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(bfin_pata_resources),
- .resource = bfin_pata_resources,
- .dev = {
- .platform_data = &bfin_pata_platform_data,
- }
-};
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/input.h>
#include <linux/gpio_keys.h>
@@ -583,10 +545,6 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_sport1_uart_device,
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- &bfin_pata_device,
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
@@ -625,10 +583,6 @@ static int __init stamp_init(void)
#endif
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
-
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
-#endif
return 0;
}
diff --git a/arch/blackfin/mach-bf533/head.S b/arch/blackfin/mach-bf533/head.S
index c671e8549b17..d59db86195b6 100644
--- a/arch/blackfin/mach-bf533/head.S
+++ b/arch/blackfin/mach-bf533/head.S
@@ -30,294 +30,11 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
-#include <asm/trace.h>
#ifdef CONFIG_BFIN_KERNEL_CLOCK
#include <asm/mach-common/clocks.h>
#include <asm/mach/mem_init.h>
#endif
-.extern ___bss_stop
-.extern ___bss_start
-.extern _bf53x_relocate_l1_mem
-
-#define INITIAL_STACK 0xFFB01000
-
-__INIT
-
-ENTRY(__start)
- /* R0: argument of command line string, passed from uboot, save it */
- R7 = R0;
- /* Enable Cycle Counter and Nesting Of Interrupts */
-#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
- R0 = SYSCFG_SNEN;
-#else
- R0 = SYSCFG_SNEN | SYSCFG_CCEN;
-#endif
- SYSCFG = R0;
- R0 = 0;
-
- /* Clear Out All the data and pointer Registers */
- R1 = R0;
- R2 = R0;
- R3 = R0;
- R4 = R0;
- R5 = R0;
- R6 = R0;
-
- P0 = R0;
- P1 = R0;
- P2 = R0;
- P3 = R0;
- P4 = R0;
- P5 = R0;
-
- LC0 = r0;
- LC1 = r0;
- L0 = r0;
- L1 = r0;
- L2 = r0;
- L3 = r0;
-
- /* Clear Out All the DAG Registers */
- B0 = r0;
- B1 = r0;
- B2 = r0;
- B3 = r0;
-
- I0 = r0;
- I1 = r0;
- I2 = r0;
- I3 = r0;
-
- M0 = r0;
- M1 = r0;
- M2 = r0;
- M3 = r0;
-
- trace_buffer_init(p0,r0);
- P0 = R1;
- R0 = R1;
-
- p0.h = hi(FIO_MASKA_C);
- p0.l = lo(FIO_MASKA_C);
- r0 = 0xFFFF(Z);
- w[p0] = r0.L; /* Disable all interrupts */
- ssync;
-
- p0.h = hi(FIO_MASKB_C);
- p0.l = lo(FIO_MASKB_C);
- r0 = 0xFFFF(Z);
- w[p0] = r0.L; /* Disable all interrupts */
- ssync;
-
- /* Turn off the icache */
- p0.l = LO(IMEM_CONTROL);
- p0.h = HI(IMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENICPLB;
- R0 = R0 & R1;
-
- /* Anomaly 05000125 */
-#if ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
- [p0] = R0;
- SSYNC;
-#if ANOMALY_05000125
- STI R2;
-#endif
-
- /* Turn off the dcache */
- p0.l = LO(DMEM_CONTROL);
- p0.h = HI(DMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENDCPLB;
- R0 = R0 & R1;
-
- /* Anomaly 05000125 */
-#if ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
- [p0] = R0;
- SSYNC;
-#if ANOMALY_05000125
- STI R2;
-#endif
-
- /* Initialise UART - when booting from u-boot, the UART is not disabled
- * so if we dont initalize here, our serial console gets hosed */
- p0.h = hi(BFIN_UART_LCR);
- p0.l = lo(BFIN_UART_LCR);
- r0 = 0x0(Z);
- w[p0] = r0.L; /* To enable DLL writes */
- ssync;
-
- p0.h = hi(BFIN_UART_DLL);
- p0.l = lo(BFIN_UART_DLL);
- r0 = 0x0(Z);
- w[p0] = r0.L;
- ssync;
-
- p0.h = hi(BFIN_UART_DLH);
- p0.l = lo(BFIN_UART_DLH);
- r0 = 0x00(Z);
- w[p0] = r0.L;
- ssync;
-
- p0.h = hi(BFIN_UART_GCTL);
- p0.l = lo(BFIN_UART_GCTL);
- r0 = 0x0(Z);
- w[p0] = r0.L; /* To enable UART clock */
- ssync;
-
- /* Initialize stack pointer */
- sp.l = lo(INITIAL_STACK);
- sp.h = hi(INITIAL_STACK);
- fp = sp;
- usp = sp;
-
-#ifdef CONFIG_EARLY_PRINTK
- SP += -12;
- call _init_early_exception_vectors;
- SP += 12;
-#endif
-
- /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
- call _bf53x_relocate_l1_mem;
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
- call _start_dma_code;
-#endif
-
- /* Code for initializing Async memory banks */
-
- p2.h = hi(EBIU_AMBCTL1);
- p2.l = lo(EBIU_AMBCTL1);
- r0.h = hi(AMBCTL1VAL);
- r0.l = lo(AMBCTL1VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMBCTL0);
- p2.l = lo(EBIU_AMBCTL0);
- r0.h = hi(AMBCTL0VAL);
- r0.l = lo(AMBCTL0VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMGCTL);
- p2.l = lo(EBIU_AMGCTL);
- r0 = AMGCTLVAL;
- w[p2] = r0;
- ssync;
-
- /* This section keeps the processor in supervisor mode
- * during kernel boot. Switches to user mode at end of boot.
- * See page 3-9 of Hardware Reference manual for documentation.
- */
-
- /* EVT15 = _real_start */
-
- p0.l = lo(EVT15);
- p0.h = hi(EVT15);
- p1.l = _real_start;
- p1.h = _real_start;
- [p0] = p1;
- csync;
-
- p0.l = lo(IMASK);
- p0.h = hi(IMASK);
- p1.l = IMASK_IVG15;
- p1.h = 0x0;
- [p0] = p1;
- csync;
-
- raise 15;
- p0.l = .LWAIT_HERE;
- p0.h = .LWAIT_HERE;
- reti = p0;
-#if ANOMALY_05000281
- nop; nop; nop;
-#endif
- rti;
-
-.LWAIT_HERE:
- jump .LWAIT_HERE;
-ENDPROC(__start)
-
-ENTRY(_real_start)
- [ -- sp ] = reti;
- p0.l = lo(WDOG_CTL);
- p0.h = hi(WDOG_CTL);
- r0 = 0xAD6(z);
- w[p0] = r0; /* watchdog off for now */
- ssync;
-
- /* Code update for BSS size == 0
- * Zero out the bss region.
- */
-
- p1.l = ___bss_start;
- p1.h = ___bss_start;
- p2.l = ___bss_stop;
- p2.h = ___bss_stop;
- r0 = 0;
- p2 -= p1;
- lsetup (.L_clear_bss, .L_clear_bss) lc0 = p2;
-.L_clear_bss:
- B[p1++] = r0;
-
- /* In case there is a NULL pointer reference
- * Zero out region before stext
- */
-
- p1.l = 0x0;
- p1.h = 0x0;
- r0.l = __stext;
- r0.h = __stext;
- r0 = r0 >> 1;
- p2 = r0;
- r0 = 0;
- lsetup (.L_clear_zero, .L_clear_zero) lc0 = p2;
-.L_clear_zero:
- W[p1++] = r0;
-
- /* pass the uboot arguments to the global value command line */
- R0 = R7;
- call _cmdline_init;
-
- p1.l = __rambase;
- p1.h = __rambase;
- r0.l = __sdata;
- r0.h = __sdata;
- [p1] = r0;
-
- p1.l = __ramstart;
- p1.h = __ramstart;
- p3.l = ___bss_stop;
- p3.h = ___bss_stop;
-
- r1 = p3;
- [p1] = r1;
-
- /*
- * load the current thread pointer and stack
- */
- r1.l = _init_thread_union;
- r1.h = _init_thread_union;
-
- r2.l = 0x2000;
- r2.h = 0x0000;
- r1 = r1 + r2;
- sp = r1;
- usp = sp;
- fp = sp;
- jump.l _start_kernel;
-ENDPROC(_real_start)
-
-__FINIT
-
.section .l1.text
#ifdef CONFIG_BFIN_KERNEL_CLOCK
ENTRY(_start_dma_code)
@@ -412,13 +129,6 @@ ENTRY(_start_dma_code)
[P2] = R1;
SSYNC;
- p0.h = hi(SIC_IWR);
- p0.l = lo(SIC_IWR);
- r0.l = lo(IWR_ENABLE_ALL);
- r0.h = hi(IWR_ENABLE_ALL);
- [p0] = r0;
- SSYNC;
-
RTS;
ENDPROC(_start_dma_code)
#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf533/ints-priority.c b/arch/blackfin/mach-bf533/ints-priority.c
index 7d79e0f9503d..f51994b7a2b9 100644
--- a/arch/blackfin/mach-bf533/ints-priority.c
+++ b/arch/blackfin/mach-bf533/ints-priority.c
@@ -31,7 +31,7 @@
#include <linux/irq.h>
#include <asm/blackfin.h>
-void program_IAR(void)
+void __init program_IAR(void)
{
/* Program the IAR0 Register with the configured priority */
bfin_write_SIC_IAR0(((CONFIG_PLLWAKE_ERROR - 7) << PLLWAKE_ERROR_POS) |
diff --git a/arch/blackfin/mach-bf537/boards/Kconfig b/arch/blackfin/mach-bf537/boards/Kconfig
index 7e789dbef036..42a57b0acb29 100644
--- a/arch/blackfin/mach-bf537/boards/Kconfig
+++ b/arch/blackfin/mach-bf537/boards/Kconfig
@@ -15,6 +15,12 @@ config BFIN537_BLUETECHNIX_CM
help
CM-BF537 support for EVAL- and DEV-Board.
+config BFIN537_BLUETECHNIX_TCM
+ bool "Bluetechnix TCM-BF537"
+ depends on (BF537)
+ help
+ TCM-BF537 support for EVAL- and DEV-Board.
+
config PNAV10
bool "PNAV board"
depends on (BF537)
diff --git a/arch/blackfin/mach-bf537/boards/Makefile b/arch/blackfin/mach-bf537/boards/Makefile
index c94f7a5b8211..7168cc14afd8 100644
--- a/arch/blackfin/mach-bf537/boards/Makefile
+++ b/arch/blackfin/mach-bf537/boards/Makefile
@@ -5,5 +5,6 @@
obj-$(CONFIG_GENERIC_BF537_BOARD) += generic_board.o
obj-$(CONFIG_BFIN537_STAMP) += stamp.o
obj-$(CONFIG_BFIN537_BLUETECHNIX_CM) += cm_bf537.o
+obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM) += tcm_bf537.o
obj-$(CONFIG_PNAV10) += pnav10.o
obj-$(CONFIG_CAMSIG_MINOTAUR) += minotaur.o
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c
index 73f2142875e2..dde14720b0ea 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -33,6 +33,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
@@ -56,16 +57,16 @@ const char bfin_board_name[] = "Bluetechnix CM BF537";
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
@@ -307,6 +308,55 @@ static struct platform_device net2272_bfin_device = {
};
#endif
+#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
+static struct mtd_partition cm_partitions[] = {
+ {
+ .name = "bootloader(nor)",
+ .size = 0x40000,
+ .offset = 0,
+ }, {
+ .name = "linux kernel(nor)",
+ .size = 0xE0000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "file system(nor)",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static struct physmap_flash_data cm_flash_data = {
+ .width = 2,
+ .parts = cm_partitions,
+ .nr_parts = ARRAY_SIZE(cm_partitions),
+};
+
+static unsigned cm_flash_gpios[] = { GPIO_PF4 };
+
+static struct resource cm_flash_resource[] = {
+ {
+ .name = "cfi_probe",
+ .start = 0x20000000,
+ .end = 0x201fffff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = (unsigned long)cm_flash_gpios,
+ .end = ARRAY_SIZE(cm_flash_gpios),
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device cm_flash_device = {
+ .name = "gpio-addr-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &cm_flash_data,
+ },
+ .num_resources = ARRAY_SIZE(cm_flash_resource),
+ .resource = cm_flash_resource,
+};
+#endif
+
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
static struct resource bfin_uart_resources[] = {
{
@@ -395,7 +445,7 @@ static struct platform_device bfin_mac_device = {
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 64
+#define PATA_INT IRQ_PF14
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 2,
@@ -510,6 +560,10 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
&bfin_pata_device,
#endif
+
+#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
+ &cm_flash_device,
+#endif
};
static int __init cm_bf537_init(void)
diff --git a/arch/blackfin/mach-bf537/boards/generic_board.c b/arch/blackfin/mach-bf537/boards/generic_board.c
index 01b63e2ec18f..78a13d5bfd55 100644
--- a/arch/blackfin/mach-bf537/boards/generic_board.c
+++ b/arch/blackfin/mach-bf537/boards/generic_board.c
@@ -38,7 +38,6 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/sl811.h>
@@ -307,16 +306,16 @@ static struct platform_device net2272_bfin_device = {
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
@@ -619,43 +618,6 @@ static struct platform_device bfin_sport1_uart_device = {
};
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 55
-
-static struct pata_platform_info bfin_pata_platform_data = {
- .ioport_shift = 1,
- .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
-};
-
-static struct resource bfin_pata_resources[] = {
- {
- .start = 0x20314020,
- .end = 0x2031403F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x2031401C,
- .end = 0x2031401F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = PATA_INT,
- .end = PATA_INT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device bfin_pata_device = {
- .name = "pata_platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(bfin_pata_resources),
- .resource = bfin_pata_resources,
- .dev = {
- .platform_data = &bfin_pata_platform_data,
- }
-};
-#endif
-
static struct platform_device *stamp_devices[] __initdata = {
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
&bfin_pcmcia_cf_device,
@@ -717,10 +679,6 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_sport0_uart_device,
&bfin_sport1_uart_device,
#endif
-
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- &bfin_pata_device,
-#endif
};
static int __init stamp_init(void)
@@ -732,9 +690,6 @@ static int __init stamp_init(void)
ARRAY_SIZE(bfin_spi_board_info));
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
-#endif
return 0;
}
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 18ddf7a52005..48c4cd2d1be6 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -100,16 +100,16 @@ static struct platform_device net2272_bfin_device = {
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "uboot",
+ .name = "bootloader(spi)",
.size = PSIZE_UBOOT,
.offset = 0x000000,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "initramfs",
+ .name = "initramfs(spi)",
.size = PSIZE_INITRAMFS,
.offset = PSIZE_UBOOT
}, {
- .name = "opt",
+ .name = "opt(spi)",
.size = FLASH_SIZE - (PSIZE_UBOOT + PSIZE_INITRAMFS),
.offset = PSIZE_UBOOT + PSIZE_INITRAMFS,
}
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 51c3bab14a69..f9174c11cbd4 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -231,16 +231,16 @@ static struct platform_device net2272_bfin_device = {
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 6dbc76fb080b..e93964fdb432 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -364,11 +364,11 @@ const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static struct mtd_partition bfin_plat_nand_partitions[] = {
{
- .name = "linux kernel",
+ .name = "linux kernel(nand)",
.size = 0x400000,
.offset = 0,
}, {
- .name = "file system",
+ .name = "file system(nand)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
},
@@ -439,19 +439,19 @@ static void bfin_plat_nand_init(void) {}
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition stamp_partitions[] = {
{
- .name = "Bootloader",
+ .name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
- .name = "Kernel",
+ .name = "linux kernel(nor)",
.size = 0xE0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "RootFS",
+ .name = "file system(nor)",
.size = 0x400000 - 0x40000 - 0xE0000 - 0x10000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "MAC Address",
+ .name = "MAC Address(nor)",
.size = MTDPART_SIZ_FULL,
.offset = 0x3F0000,
.mask_flags = MTD_WRITEABLE,
@@ -485,16 +485,16 @@ static struct platform_device stamp_flash_device = {
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0xe0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
new file mode 100644
index 000000000000..d5ff705a5129
--- /dev/null
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -0,0 +1,590 @@
+/*
+ * File: arch/blackfin/mach-bf537/boards/tcm_bf537.c
+ * Based on: arch/blackfin/mach-bf533/boards/cm_bf537.c
+ * Author: Aidan Williams <aidan@nicta.com.au>
+ *
+ * Created: 2005
+ * Description: Board description file
+ *
+ * Modified:
+ * Copyright 2005 National ICT Australia (NICTA)
+ * Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
+#include <linux/usb/isp1362.h>
+#endif
+#include <linux/ata_platform.h>
+#include <linux/irq.h>
+#include <asm/dma.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/portmux.h>
+#include <asm/dpmc.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "Bluetechnix TCM BF537";
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+/* all SPI peripherals info goes here */
+
+#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
+static struct mtd_partition bfin_spi_flash_partitions[] = {
+ {
+ .name = "bootloader(spi)",
+ .size = 0x00020000,
+ .offset = 0,
+ .mask_flags = MTD_CAP_ROM
+ }, {
+ .name = "linux kernel(spi)",
+ .size = 0xe0000,
+ .offset = 0x20000
+ }, {
+ .name = "file system(spi)",
+ .size = 0x700000,
+ .offset = 0x00100000,
+ }
+};
+
+static struct flash_platform_data bfin_spi_flash_data = {
+ .name = "m25p80",
+ .parts = bfin_spi_flash_partitions,
+ .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
+ .type = "m25p64",
+};
+
+/* SPI flash chip (m25p64) */
+static struct bfin5xx_spi_chip spi_flash_chip_info = {
+ .enable_dma = 0, /* use dma transfer with this chip*/
+ .bits_per_word = 8,
+};
+#endif
+
+#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE)
+/* SPI ADC chip */
+static struct bfin5xx_spi_chip spi_adc_chip_info = {
+ .enable_dma = 1, /* use dma transfer with this chip*/
+ .bits_per_word = 16,
+};
+#endif
+
+#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE)
+static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
+ .enable_dma = 0,
+ .bits_per_word = 16,
+};
+#endif
+
+#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE)
+static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
+ .enable_dma = 0,
+ .bits_per_word = 16,
+};
+#endif
+
+#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+static struct bfin5xx_spi_chip spi_mmc_chip_info = {
+ .enable_dma = 1,
+ .bits_per_word = 8,
+};
+#endif
+
+static struct spi_board_info bfin_spi_board_info[] __initdata = {
+#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
+ {
+ /* the modalias must be the same as spi device driver name */
+ .modalias = "m25p80", /* Name of spi_driver for this device */
+ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0, /* Framework bus number */
+ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
+ .platform_data = &bfin_spi_flash_data,
+ .controller_data = &spi_flash_chip_info,
+ .mode = SPI_MODE_3,
+ },
+#endif
+
+#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE)
+ {
+ .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
+ .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0, /* Framework bus number */
+ .chip_select = 1, /* Framework chip select. */
+ .platform_data = NULL, /* No spi_driver specific config */
+ .controller_data = &spi_adc_chip_info,
+ },
+#endif
+
+#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE)
+ {
+ .modalias = "ad1836-spi",
+ .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT,
+ .controller_data = &ad1836_spi_chip_info,
+ },
+#endif
+
+#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE)
+ {
+ .modalias = "ad9960-spi",
+ .max_speed_hz = 10000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = 1,
+ .controller_data = &ad9960_spi_chip_info,
+ },
+#endif
+
+#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+ {
+ .modalias = "spi_mmc_dummy",
+ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = 7,
+ .platform_data = NULL,
+ .controller_data = &spi_mmc_chip_info,
+ .mode = SPI_MODE_3,
+ },
+ {
+ .modalias = "spi_mmc",
+ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = CONFIG_SPI_MMC_CS_CHAN,
+ .platform_data = NULL,
+ .controller_data = &spi_mmc_chip_info,
+ .mode = SPI_MODE_3,
+ },
+#endif
+};
+
+/* SPI (0) */
+static struct resource bfin_spi0_resource[] = {
+ [0] = {
+ .start = SPI0_REGBASE,
+ .end = SPI0_REGBASE + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = CH_SPI,
+ .end = CH_SPI,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+/* SPI controller data */
+static struct bfin5xx_spi_master bfin_spi0_info = {
+ .num_chipselect = 8,
+ .enable_dma = 1, /* master has the ability to do dma transfer */
+ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct platform_device bfin_spi0_device = {
+ .name = "bfin-spi",
+ .id = 0, /* Bus number */
+ .num_resources = ARRAY_SIZE(bfin_spi0_resource),
+ .resource = bfin_spi0_resource,
+ .dev = {
+ .platform_data = &bfin_spi0_info, /* Passed to driver */
+ },
+};
+#endif /* spi master and devices */
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+static struct platform_device rtc_device = {
+ .name = "rtc-bfin",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE)
+static struct platform_device hitachi_fb_device = {
+ .name = "hitachi-tx09",
+};
+#endif
+
+#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+static struct resource smc91x_resources[] = {
+ {
+ .start = 0x20200300,
+ .end = 0x20200300 + 16,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_PF14,
+ .end = IRQ_PF14,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+#endif
+
+#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
+static struct resource isp1362_hcd_resources[] = {
+ {
+ .start = 0x20308000,
+ .end = 0x20308000,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = 0x20308004,
+ .end = 0x20308004,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_PG15,
+ .end = IRQ_PG15,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+ },
+};
+
+static struct isp1362_platform_data isp1362_priv = {
+ .sel15Kres = 1,
+ .clknotstop = 0,
+ .oc_enable = 0,
+ .int_act_high = 0,
+ .int_edge_triggered = 0,
+ .remote_wakeup_connected = 0,
+ .no_power_switching = 1,
+ .power_switching_mode = 0,
+};
+
+static struct platform_device isp1362_hcd_device = {
+ .name = "isp1362-hcd",
+ .id = 0,
+ .dev = {
+ .platform_data = &isp1362_priv,
+ },
+ .num_resources = ARRAY_SIZE(isp1362_hcd_resources),
+ .resource = isp1362_hcd_resources,
+};
+#endif
+
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+static struct resource net2272_bfin_resources[] = {
+ {
+ .start = 0x20200000,
+ .end = 0x20200000 + 0x100,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_PH14,
+ .end = IRQ_PH14,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+ },
+};
+
+static struct platform_device net2272_bfin_device = {
+ .name = "net2272",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(net2272_bfin_resources),
+ .resource = net2272_bfin_resources,
+};
+#endif
+
+#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
+static struct mtd_partition cm_partitions[] = {
+ {
+ .name = "bootloader(nor)",
+ .size = 0x40000,
+ .offset = 0,
+ }, {
+ .name = "linux kernel(nor)",
+ .size = 0xE0000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "file system(nor)",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static struct physmap_flash_data cm_flash_data = {
+ .width = 2,
+ .parts = cm_partitions,
+ .nr_parts = ARRAY_SIZE(cm_partitions),
+};
+
+static unsigned cm_flash_gpios[] = { GPIO_PF4, GPIO_PF5 };
+
+static struct resource cm_flash_resource[] = {
+ {
+ .name = "cfi_probe",
+ .start = 0x20000000,
+ .end = 0x201fffff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = (unsigned long)cm_flash_gpios,
+ .end = ARRAY_SIZE(cm_flash_gpios),
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device cm_flash_device = {
+ .name = "gpio-addr-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &cm_flash_data,
+ },
+ .num_resources = ARRAY_SIZE(cm_flash_resource),
+ .resource = cm_flash_resource,
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+static struct resource bfin_uart_resources[] = {
+ {
+ .start = 0xFFC00400,
+ .end = 0xFFC004FF,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = 0xFFC02000,
+ .end = 0xFFC020FF,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device bfin_uart_device = {
+ .name = "bfin-uart",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(bfin_uart_resources),
+ .resource = bfin_uart_resources,
+};
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+static struct resource bfin_sir_resources[] = {
+#ifdef CONFIG_BFIN_SIR0
+ {
+ .start = 0xFFC00400,
+ .end = 0xFFC004FF,
+ .flags = IORESOURCE_MEM,
+ },
+#endif
+#ifdef CONFIG_BFIN_SIR1
+ {
+ .start = 0xFFC02000,
+ .end = 0xFFC020FF,
+ .flags = IORESOURCE_MEM,
+ },
+#endif
+};
+
+static struct platform_device bfin_sir_device = {
+ .name = "bfin_sir",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bfin_sir_resources),
+ .resource = bfin_sir_resources,
+};
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static struct resource bfin_twi0_resource[] = {
+ [0] = {
+ .start = TWI0_REGBASE,
+ .end = TWI0_REGBASE,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_TWI,
+ .end = IRQ_TWI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device i2c_bfin_twi_device = {
+ .name = "i2c-bfin-twi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bfin_twi0_resource),
+ .resource = bfin_twi0_resource,
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+static struct platform_device bfin_sport0_uart_device = {
+ .name = "bfin-sport-uart",
+ .id = 0,
+};
+
+static struct platform_device bfin_sport1_uart_device = {
+ .name = "bfin-sport-uart",
+ .id = 1,
+};
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+static struct platform_device bfin_mac_device = {
+ .name = "bfin_mac",
+};
+#endif
+
+#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
+#define PATA_INT IRQ_PF14
+
+static struct pata_platform_info bfin_pata_platform_data = {
+ .ioport_shift = 2,
+ .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
+};
+
+static struct resource bfin_pata_resources[] = {
+ {
+ .start = 0x2030C000,
+ .end = 0x2030C01F,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x2030D018,
+ .end = 0x2030D01B,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = PATA_INT,
+ .end = PATA_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device bfin_pata_device = {
+ .name = "pata_platform",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(bfin_pata_resources),
+ .resource = bfin_pata_resources,
+ .dev = {
+ .platform_data = &bfin_pata_platform_data,
+ }
+};
+#endif
+
+static const unsigned int cclk_vlev_datasheet[] =
+{
+ VRPAIR(VLEV_085, 250000000),
+ VRPAIR(VLEV_090, 376000000),
+ VRPAIR(VLEV_095, 426000000),
+ VRPAIR(VLEV_100, 426000000),
+ VRPAIR(VLEV_105, 476000000),
+ VRPAIR(VLEV_110, 476000000),
+ VRPAIR(VLEV_115, 476000000),
+ VRPAIR(VLEV_120, 500000000),
+ VRPAIR(VLEV_125, 533000000),
+ VRPAIR(VLEV_130, 600000000),
+};
+
+static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
+ .tuple_tab = cclk_vlev_datasheet,
+ .tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
+ .vr_settling_time = 25 /* us */,
+};
+
+static struct platform_device bfin_dpmc = {
+ .name = "bfin dpmc",
+ .dev = {
+ .platform_data = &bfin_dmpc_vreg_data,
+ },
+};
+
+static struct platform_device *cm_bf537_devices[] __initdata = {
+
+ &bfin_dpmc,
+
+#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE)
+ &hitachi_fb_device,
+#endif
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+ &rtc_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+ &bfin_uart_device,
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+ &bfin_sir_device,
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+ &i2c_bfin_twi_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+ &bfin_sport0_uart_device,
+ &bfin_sport1_uart_device,
+#endif
+
+#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
+ &isp1362_hcd_device,
+#endif
+
+#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+ &smc91x_device,
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+ &bfin_mac_device,
+#endif
+
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ &net2272_bfin_device,
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+ &bfin_spi0_device,
+#endif
+
+#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
+ &bfin_pata_device,
+#endif
+
+#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
+ &cm_flash_device,
+#endif
+};
+
+static int __init cm_bf537_init(void)
+{
+ printk(KERN_INFO "%s(): registering device resources\n", __func__);
+ platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices));
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+ spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+#endif
+
+#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
+ irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
+#endif
+ return 0;
+}
+
+arch_initcall(cm_bf537_init);
+
+void bfin_get_ether_addr(char *addr)
+{
+ random_ether_addr(addr);
+ printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__);
+}
+EXPORT_SYMBOL(bfin_get_ether_addr);
diff --git a/arch/blackfin/mach-bf537/head.S b/arch/blackfin/mach-bf537/head.S
index 6b019eaee0b6..64e0287ab266 100644
--- a/arch/blackfin/mach-bf537/head.S
+++ b/arch/blackfin/mach-bf537/head.S
@@ -30,325 +30,11 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
-#include <asm/trace.h>
-
#ifdef CONFIG_BFIN_KERNEL_CLOCK
#include <asm/mach-common/clocks.h>
#include <asm/mach/mem_init.h>
#endif
-.extern ___bss_stop
-.extern ___bss_start
-.extern _bf53x_relocate_l1_mem
-
-#define INITIAL_STACK 0xFFB01000
-
-__INIT
-
-ENTRY(__start)
- /* R0: argument of command line string, passed from uboot, save it */
- R7 = R0;
- /* Enable Cycle Counter and Nesting Of Interrupts */
-#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
- R0 = SYSCFG_SNEN;
-#else
- R0 = SYSCFG_SNEN | SYSCFG_CCEN;
-#endif
- SYSCFG = R0;
- R0 = 0;
-
- /* Clear Out All the data and pointer Registers */
- R1 = R0;
- R2 = R0;
- R3 = R0;
- R4 = R0;
- R5 = R0;
- R6 = R0;
-
- P0 = R0;
- P1 = R0;
- P2 = R0;
- P3 = R0;
- P4 = R0;
- P5 = R0;
-
- LC0 = r0;
- LC1 = r0;
- L0 = r0;
- L1 = r0;
- L2 = r0;
- L3 = r0;
-
- /* Clear Out All the DAG Registers */
- B0 = r0;
- B1 = r0;
- B2 = r0;
- B3 = r0;
-
- I0 = r0;
- I1 = r0;
- I2 = r0;
- I3 = r0;
-
- M0 = r0;
- M1 = r0;
- M2 = r0;
- M3 = r0;
-
- trace_buffer_init(p0,r0);
- P0 = R1;
- R0 = R1;
-
- /* Turn off the icache */
- p0.l = LO(IMEM_CONTROL);
- p0.h = HI(IMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENICPLB;
- R0 = R0 & R1;
-
- /* Anomaly 05000125 */
-#if ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
- [p0] = R0;
- SSYNC;
-#if ANOMALY_05000125
- STI R2;
-#endif
-
- /* Turn off the dcache */
- p0.l = LO(DMEM_CONTROL);
- p0.h = HI(DMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENDCPLB;
- R0 = R0 & R1;
-
- /* Anomaly 05000125 */
-#if ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
- [p0] = R0;
- SSYNC;
-#if ANOMALY_05000125
- STI R2;
-#endif
-
- /* Initialise General-Purpose I/O Modules on BF537 */
- /* Rev 0.0 Anomaly 05000212 - PORTx_FER,
- * PORT_MUX Registers Do Not accept "writes" correctly:
- */
- p0.h = hi(BFIN_PORT_MUX);
- p0.l = lo(BFIN_PORT_MUX);
-#if ANOMALY_05000212
- R0.L = W[P0]; /* Read */
- SSYNC;
-#endif
- R0 = (PGDE_UART | PFTE_UART)(Z);
-#if ANOMALY_05000212
- W[P0] = R0.L; /* Write */
- SSYNC;
-#endif
- W[P0] = R0.L; /* Enable both UARTS */
- SSYNC;
-
- p0.h = hi(PORTF_FER);
- p0.l = lo(PORTF_FER);
-#if ANOMALY_05000212
- R0.L = W[P0]; /* Read */
- SSYNC;
-#endif
- R0 = 0x000F(Z);
-#if ANOMALY_05000212
- W[P0] = R0.L; /* Write */
- SSYNC;
-#endif
- /* Enable peripheral function of PORTF for UART0 and UART1 */
- W[P0] = R0.L;
- SSYNC;
-
-#if !defined(CONFIG_BF534)
- p0.h = hi(EMAC_SYSTAT);
- p0.l = lo(EMAC_SYSTAT);
- R0.h = 0xFFFF; /* Clear EMAC Interrupt Status bits */
- R0.l = 0xFFFF;
- [P0] = R0;
- SSYNC;
-#endif
-
- /* Initialise UART - when booting from u-boot, the UART is not disabled
- * so if we dont initalize here, our serial console gets hosed */
- p0.h = hi(BFIN_UART_LCR);
- p0.l = lo(BFIN_UART_LCR);
- r0 = 0x0(Z);
- w[p0] = r0.L; /* To enable DLL writes */
- ssync;
-
- p0.h = hi(BFIN_UART_DLL);
- p0.l = lo(BFIN_UART_DLL);
- r0 = 0x0(Z);
- w[p0] = r0.L;
- ssync;
-
- p0.h = hi(BFIN_UART_DLH);
- p0.l = lo(BFIN_UART_DLH);
- r0 = 0x00(Z);
- w[p0] = r0.L;
- ssync;
-
- p0.h = hi(BFIN_UART_GCTL);
- p0.l = lo(BFIN_UART_GCTL);
- r0 = 0x0(Z);
- w[p0] = r0.L; /* To enable UART clock */
- ssync;
-
- /* Initialize stack pointer */
- sp.l = lo(INITIAL_STACK);
- sp.h = hi(INITIAL_STACK);
- fp = sp;
- usp = sp;
-
-#ifdef CONFIG_EARLY_PRINTK
- SP += -12;
- call _init_early_exception_vectors;
- SP += 12;
-#endif
-
- /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
- call _bf53x_relocate_l1_mem;
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
- call _start_dma_code;
-#endif
-
- /* Code for initializing Async memory banks */
-
- p2.h = hi(EBIU_AMBCTL1);
- p2.l = lo(EBIU_AMBCTL1);
- r0.h = hi(AMBCTL1VAL);
- r0.l = lo(AMBCTL1VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMBCTL0);
- p2.l = lo(EBIU_AMBCTL0);
- r0.h = hi(AMBCTL0VAL);
- r0.l = lo(AMBCTL0VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMGCTL);
- p2.l = lo(EBIU_AMGCTL);
- r0 = AMGCTLVAL;
- w[p2] = r0;
- ssync;
-
- /* This section keeps the processor in supervisor mode
- * during kernel boot. Switches to user mode at end of boot.
- * See page 3-9 of Hardware Reference manual for documentation.
- */
-
- /* EVT15 = _real_start */
-
- p0.l = lo(EVT15);
- p0.h = hi(EVT15);
- p1.l = _real_start;
- p1.h = _real_start;
- [p0] = p1;
- csync;
-
- p0.l = lo(IMASK);
- p0.h = hi(IMASK);
- p1.l = IMASK_IVG15;
- p1.h = 0x0;
- [p0] = p1;
- csync;
-
- raise 15;
- p0.l = .LWAIT_HERE;
- p0.h = .LWAIT_HERE;
- reti = p0;
-#if ANOMALY_05000281
- nop; nop; nop;
-#endif
- rti;
-
-.LWAIT_HERE:
- jump .LWAIT_HERE;
-ENDPROC(__start)
-
-ENTRY(_real_start)
- [ -- sp ] = reti;
- p0.l = lo(WDOG_CTL);
- p0.h = hi(WDOG_CTL);
- r0 = 0xAD6(z);
- w[p0] = r0; /* watchdog off for now */
- ssync;
-
- /* Code update for BSS size == 0
- * Zero out the bss region.
- */
-
- p1.l = ___bss_start;
- p1.h = ___bss_start;
- p2.l = ___bss_stop;
- p2.h = ___bss_stop;
- r0 = 0;
- p2 -= p1;
- lsetup (.L_clear_bss, .L_clear_bss) lc0 = p2;
-.L_clear_bss:
- B[p1++] = r0;
-
- /* In case there is a NULL pointer reference
- * Zero out region before stext
- */
-
- p1.l = 0x0;
- p1.h = 0x0;
- r0.l = __stext;
- r0.h = __stext;
- r0 = r0 >> 1;
- p2 = r0;
- r0 = 0;
- lsetup (.L_clear_zero, .L_clear_zero) lc0 = p2;
-.L_clear_zero:
- W[p1++] = r0;
-
- /* pass the uboot arguments to the global value command line */
- R0 = R7;
- call _cmdline_init;
-
- p1.l = __rambase;
- p1.h = __rambase;
- r0.l = __sdata;
- r0.h = __sdata;
- [p1] = r0;
-
- p1.l = __ramstart;
- p1.h = __ramstart;
- p3.l = ___bss_stop;
- p3.h = ___bss_stop;
-
- r1 = p3;
- [p1] = r1;
-
- /*
- * load the current thread pointer and stack
- */
- r1.l = _init_thread_union;
- r1.h = _init_thread_union;
-
- r2.l = 0x2000;
- r2.h = 0x0000;
- r1 = r1 + r2;
- sp = r1;
- usp = sp;
- fp = sp;
- jump.l _start_kernel;
-ENDPROC(_real_start)
-
-__FINIT
-
.section .l1.text
#ifdef CONFIG_BFIN_KERNEL_CLOCK
ENTRY(_start_dma_code)
@@ -452,13 +138,6 @@ ENTRY(_start_dma_code)
[P2] = R1;
SSYNC;
- p0.h = hi(SIC_IWR);
- p0.l = lo(SIC_IWR);
- r0.l = lo(IWR_ENABLE_ALL);
- r0.h = hi(IWR_ENABLE_ALL);
- [p0] = r0;
- SSYNC;
-
RTS;
ENDPROC(_start_dma_code)
#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c
index a8b915f202ec..b1300b3f1812 100644
--- a/arch/blackfin/mach-bf537/ints-priority.c
+++ b/arch/blackfin/mach-bf537/ints-priority.c
@@ -31,7 +31,7 @@
#include <linux/irq.h>
#include <asm/blackfin.h>
-void program_IAR(void)
+void __init program_IAR(void)
{
/* Program the IAR0 Register with the configured priority */
bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) |
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 4f4ae8787edf..58abbed0a225 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -319,12 +319,12 @@ static struct platform_device bfin_atapi_device = {
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
static struct mtd_partition partition_info[] = {
{
- .name = "Linux Kernel",
+ .name = "linux kernel(nand)",
.offset = 0,
.size = 4 * SIZE_1M,
},
{
- .name = "File System",
+ .name = "file system(nand)",
.offset = 4 * SIZE_1M,
.size = (256 - 4) * SIZE_1M,
},
@@ -377,12 +377,12 @@ static struct platform_device bf54x_sdh_device = {
/* SPI flash chip (m25p16) */
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "linux kernel",
+ .name = "linux kernel(spi)",
.size = 0x1c0000,
.offset = 0x40000
}
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 166fa2201ee7..0d6333ada1d9 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -365,12 +365,12 @@ static struct platform_device bfin_atapi_device = {
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
static struct mtd_partition partition_info[] = {
{
- .name = "Linux Kernel",
+ .name = "linux kernel(nand)",
.offset = 0,
.size = 4 * SIZE_1M,
},
{
- .name = "File System",
+ .name = "file system(nand)",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
@@ -419,15 +419,15 @@ static struct platform_device bf54x_sdh_device = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition ezkit_partitions[] = {
{
- .name = "Bootloader",
+ .name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
- .name = "Kernel",
+ .name = "linux kernel(nor)",
.size = 0x1C0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "RootFS",
+ .name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -461,12 +461,12 @@ static struct platform_device ezkit_flash_device = {
/* SPI flash chip (m25p16) */
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "linux kernel",
+ .name = "linux kernel(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
diff --git a/arch/blackfin/mach-bf548/head.S b/arch/blackfin/mach-bf548/head.S
index 06b9178cfcfe..e3000f70a26e 100644
--- a/arch/blackfin/mach-bf548/head.S
+++ b/arch/blackfin/mach-bf548/head.S
@@ -30,263 +30,11 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
-#include <asm/trace.h>
#ifdef CONFIG_BFIN_KERNEL_CLOCK
#include <asm/mach-common/clocks.h>
#include <asm/mach/mem_init.h>
#endif
-.extern ___bss_stop
-.extern ___bss_start
-.extern _bf53x_relocate_l1_mem
-
-#define INITIAL_STACK 0xFFB01000
-
-__INIT
-
-ENTRY(__start)
- /* R0: argument of command line string, passed from uboot, save it */
- R7 = R0;
- /* Enable Cycle Counter and Nesting Of Interrupts */
-#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
- R0 = SYSCFG_SNEN;
-#else
- R0 = SYSCFG_SNEN | SYSCFG_CCEN;
-#endif
- SYSCFG = R0;
- R0 = 0;
-
- /* Clear Out All the data and pointer Registers*/
- R1 = R0;
- R2 = R0;
- R3 = R0;
- R4 = R0;
- R5 = R0;
- R6 = R0;
-
- P0 = R0;
- P1 = R0;
- P2 = R0;
- P3 = R0;
- P4 = R0;
- P5 = R0;
-
- LC0 = r0;
- LC1 = r0;
- L0 = r0;
- L1 = r0;
- L2 = r0;
- L3 = r0;
-
- /* Clear Out All the DAG Registers*/
- B0 = r0;
- B1 = r0;
- B2 = r0;
- B3 = r0;
-
- I0 = r0;
- I1 = r0;
- I2 = r0;
- I3 = r0;
-
- M0 = r0;
- M1 = r0;
- M2 = r0;
- M3 = r0;
-
- trace_buffer_init(p0,r0);
- P0 = R1;
- R0 = R1;
-
- /* Turn off the icache */
- p0.l = LO(IMEM_CONTROL);
- p0.h = HI(IMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENICPLB;
- R0 = R0 & R1;
- [p0] = R0;
- SSYNC;
-
- /* Turn off the dcache */
- p0.l = LO(DMEM_CONTROL);
- p0.h = HI(DMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENDCPLB;
- R0 = R0 & R1;
- [p0] = R0;
- SSYNC;
-
- /* Initialize stack pointer */
- SP.L = LO(INITIAL_STACK);
- SP.H = HI(INITIAL_STACK);
- FP = SP;
- USP = SP;
-
-#ifdef CONFIG_EARLY_PRINTK
- SP += -12;
- call _init_early_exception_vectors;
- SP += 12;
-#endif
-
- /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
- call _bf53x_relocate_l1_mem;
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
- call _start_dma_code;
-#endif
- /* Code for initializing Async memory banks */
-
- p2.h = hi(EBIU_AMBCTL1);
- p2.l = lo(EBIU_AMBCTL1);
- r0.h = hi(AMBCTL1VAL);
- r0.l = lo(AMBCTL1VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMBCTL0);
- p2.l = lo(EBIU_AMBCTL0);
- r0.h = hi(AMBCTL0VAL);
- r0.l = lo(AMBCTL0VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMGCTL);
- p2.l = lo(EBIU_AMGCTL);
- r0 = AMGCTLVAL;
- w[p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_MBSCTL);
- p2.l = lo(EBIU_MBSCTL);
- r0.h = hi(CONFIG_EBIU_MBSCTLVAL);
- r0.l = lo(CONFIG_EBIU_MBSCTLVAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_MODE);
- p2.l = lo(EBIU_MODE);
- r0.h = hi(CONFIG_EBIU_MODEVAL);
- r0.l = lo(CONFIG_EBIU_MODEVAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_FCTL);
- p2.l = lo(EBIU_FCTL);
- r0.h = hi(CONFIG_EBIU_FCTLVAL);
- r0.l = lo(CONFIG_EBIU_FCTLVAL);
- [p2] = r0;
- ssync;
-
- /* This section keeps the processor in supervisor mode
- * during kernel boot. Switches to user mode at end of boot.
- * See page 3-9 of Hardware Reference manual for documentation.
- */
-
- /* EVT15 = _real_start */
-
- p0.l = lo(EVT15);
- p0.h = hi(EVT15);
- p1.l = _real_start;
- p1.h = _real_start;
- [p0] = p1;
- csync;
-
- p0.l = lo(IMASK);
- p0.h = hi(IMASK);
- p1.l = IMASK_IVG15;
- p1.h = 0x0;
- [p0] = p1;
- csync;
-
- raise 15;
- p0.l = .LWAIT_HERE;
- p0.h = .LWAIT_HERE;
- reti = p0;
-#if ANOMALY_05000281
- nop;
- nop;
- nop;
-#endif
- rti;
-
-.LWAIT_HERE:
- jump .LWAIT_HERE;
-ENDPROC(__start)
-
-ENTRY(_real_start)
- [ -- sp ] = reti;
- p0.l = lo(WDOG_CTL);
- p0.h = hi(WDOG_CTL);
- r0 = 0xAD6(z);
- w[p0] = r0; /* watchdog off for now */
- ssync;
-
- /* Code update for BSS size == 0
- * Zero out the bss region.
- */
-
- p1.l = ___bss_start;
- p1.h = ___bss_start;
- p2.l = ___bss_stop;
- p2.h = ___bss_stop;
- r0 = 0;
- p2 -= p1;
- lsetup (.L_clear_bss, .L_clear_bss ) lc0 = p2;
-.L_clear_bss:
- B[p1++] = r0;
-
- /* In case there is a NULL pointer reference
- * Zero out region before stext
- */
-
- p1.l = 0x0;
- p1.h = 0x0;
- r0.l = __stext;
- r0.h = __stext;
- r0 = r0 >> 1;
- p2 = r0;
- r0 = 0;
- lsetup (.L_clear_zero, .L_clear_zero ) lc0 = p2;
-.L_clear_zero:
- W[p1++] = r0;
-
- /* pass the uboot arguments to the global value command line */
- R0 = R7;
- call _cmdline_init;
-
- p1.l = __rambase;
- p1.h = __rambase;
- r0.l = __sdata;
- r0.h = __sdata;
- [p1] = r0;
-
- p1.l = __ramstart;
- p1.h = __ramstart;
- p3.l = ___bss_stop;
- p3.h = ___bss_stop;
-
- r1 = p3;
- [p1] = r1;
-
-
- /*
- * load the current thread pointer and stack
- */
- r1.l = _init_thread_union;
- r1.h = _init_thread_union;
-
- r2.l = 0x2000;
- r2.h = 0x0000;
- r1 = r1 + r2;
- sp = r1;
- usp = sp;
- fp = sp;
- call _start_kernel;
-.L_exit:
- jump.s .L_exit;
-ENDPROC(_real_start)
-
-__FINIT
-
.section .l1.text
#ifdef CONFIG_BFIN_KERNEL_CLOCK
ENTRY(_start_dma_code)
@@ -443,13 +191,6 @@ ENTRY(_start_dma_code)
SSYNC;
#endif
- p0.h = hi(SIC_IWR0);
- p0.l = lo(SIC_IWR0);
- r0.l = lo(IWR_ENABLE_ALL);
- r0.h = hi(IWR_ENABLE_ALL);
- [p0] = r0;
- SSYNC;
-
RTS;
ENDPROC(_start_dma_code)
#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf548/ints-priority.c b/arch/blackfin/mach-bf548/ints-priority.c
index 2665653cee37..9dd0fa3ac4de 100644
--- a/arch/blackfin/mach-bf548/ints-priority.c
+++ b/arch/blackfin/mach-bf548/ints-priority.c
@@ -31,7 +31,7 @@
#include <linux/irq.h>
#include <asm/blackfin.h>
-void program_IAR(void)
+void __init program_IAR(void)
{
/* Program the IAR0 Register with the configured priority */
bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) |
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 466ef5929a25..8f40990eea2f 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -54,16 +54,16 @@ const char bfin_board_name[] = "Bluetechnix CM BF561";
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
- .name = "bootloader",
+ .name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
- .name = "kernel",
+ .name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
- .name = "file system",
+ .name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
@@ -306,7 +306,7 @@ static struct platform_device bfin_sir_device = {
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 119
+#define PATA_INT IRQ_PF46
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 2,
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index bc6feded8569..50b4cdceccfe 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -35,7 +35,6 @@
#include <linux/spi/spi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/ata_platform.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
@@ -243,15 +242,15 @@ static struct platform_device bfin_sir_device = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition ezkit_partitions[] = {
{
- .name = "Bootloader",
+ .name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
- .name = "Kernel",
+ .name = "linux kernel(nor)",
.size = 0x1C0000,
.offset = MTDPART_OFS_APPEND,
}, {
- .name = "RootFS",
+ .name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
@@ -350,43 +349,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-#define PATA_INT 55
-
-static struct pata_platform_info bfin_pata_platform_data = {
- .ioport_shift = 1,
- .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
-};
-
-static struct resource bfin_pata_resources[] = {
- {
- .start = 0x20314020,
- .end = 0x2031403F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x2031401C,
- .end = 0x2031401F,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = PATA_INT,
- .end = PATA_INT,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device bfin_pata_device = {
- .name = "pata_platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(bfin_pata_resources),
- .resource = bfin_pata_resources,
- .dev = {
- .platform_data = &bfin_pata_platform_data,
- }
-};
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/input.h>
#include <linux/gpio_keys.h>
@@ -499,10 +461,6 @@ static struct platform_device *ezkit_devices[] __initdata = {
&bfin_sir_device,
#endif
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- &bfin_pata_device,
-#endif
-
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
@@ -538,10 +496,6 @@ static int __init ezkit_init(void)
#endif
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
-
-#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
- irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
-#endif
return 0;
}
diff --git a/arch/blackfin/mach-bf561/head.S b/arch/blackfin/mach-bf561/head.S
index cf1a2dff01e7..c7a81e34703d 100644
--- a/arch/blackfin/mach-bf561/head.S
+++ b/arch/blackfin/mach-bf561/head.S
@@ -30,284 +30,13 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/blackfin.h>
-#include <asm/trace.h>
-
-#if CONFIG_BFIN_KERNEL_CLOCK
+#ifdef CONFIG_BFIN_KERNEL_CLOCK
#include <asm/mach-common/clocks.h>
#include <asm/mach/mem_init.h>
#endif
-.extern ___bss_stop
-.extern ___bss_start
-.extern _bf53x_relocate_l1_mem
-
-#define INITIAL_STACK 0xFFB01000
-
-__INIT
-
-ENTRY(__start)
- /* R0: argument of command line string, passed from uboot, save it */
- R7 = R0;
- /* Enable Cycle Counter and Nesting Of Interrupts */
-#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
- R0 = SYSCFG_SNEN;
-#else
- R0 = SYSCFG_SNEN | SYSCFG_CCEN;
-#endif
- SYSCFG = R0;
- R0 = 0;
-
- /* Clear Out All the data and pointer Registers */
- R1 = R0;
- R2 = R0;
- R3 = R0;
- R4 = R0;
- R5 = R0;
- R6 = R0;
-
- P0 = R0;
- P1 = R0;
- P2 = R0;
- P3 = R0;
- P4 = R0;
- P5 = R0;
-
- LC0 = r0;
- LC1 = r0;
- L0 = r0;
- L1 = r0;
- L2 = r0;
- L3 = r0;
-
- /* Clear Out All the DAG Registers */
- B0 = r0;
- B1 = r0;
- B2 = r0;
- B3 = r0;
-
- I0 = r0;
- I1 = r0;
- I2 = r0;
- I3 = r0;
-
- M0 = r0;
- M1 = r0;
- M2 = r0;
- M3 = r0;
-
- trace_buffer_init(p0,r0);
- P0 = R1;
- R0 = R1;
-
- /* Turn off the icache */
- p0.l = LO(IMEM_CONTROL);
- p0.h = HI(IMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENICPLB;
- R0 = R0 & R1;
-
-#if ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
- [p0] = R0;
- SSYNC;
-#if ANOMALY_05000125
- STI R2;
-#endif
-
- /* Turn off the dcache */
- p0.l = LO(DMEM_CONTROL);
- p0.h = HI(DMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENDCPLB;
- R0 = R0 & R1;
-
- /* Anomaly 05000125 */
-#if ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
- [p0] = R0;
- SSYNC;
-#if ANOMALY_05000125
- STI R2;
-#endif
-
- /* Initialise UART - when booting from u-boot, the UART is not disabled
- * so if we dont initalize here, our serial console gets hosed */
- p0.h = hi(BFIN_UART_LCR);
- p0.l = lo(BFIN_UART_LCR);
- r0 = 0x0(Z);
- w[p0] = r0.L; /* To enable DLL writes */
- ssync;
-
- p0.h = hi(BFIN_UART_DLL);
- p0.l = lo(BFIN_UART_DLL);
- r0 = 0x0(Z);
- w[p0] = r0.L;
- ssync;
-
- p0.h = hi(BFIN_UART_DLH);
- p0.l = lo(BFIN_UART_DLH);
- r0 = 0x00(Z);
- w[p0] = r0.L;
- ssync;
-
- p0.h = hi(BFIN_UART_GCTL);
- p0.l = lo(BFIN_UART_GCTL);
- r0 = 0x0(Z);
- w[p0] = r0.L; /* To enable UART clock */
- ssync;
-
- /* Initialize stack pointer */
- sp.l = lo(INITIAL_STACK);
- sp.h = hi(INITIAL_STACK);
- fp = sp;
- usp = sp;
-
-#ifdef CONFIG_EARLY_PRINTK
- SP += -12;
- call _init_early_exception_vectors;
- SP += 12;
-#endif
-
- /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
- call _bf53x_relocate_l1_mem;
-#if CONFIG_BFIN_KERNEL_CLOCK
- call _start_dma_code;
-#endif
-
- /* Code for initializing Async memory banks */
-
- p2.h = hi(EBIU_AMBCTL1);
- p2.l = lo(EBIU_AMBCTL1);
- r0.h = hi(AMBCTL1VAL);
- r0.l = lo(AMBCTL1VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMBCTL0);
- p2.l = lo(EBIU_AMBCTL0);
- r0.h = hi(AMBCTL0VAL);
- r0.l = lo(AMBCTL0VAL);
- [p2] = r0;
- ssync;
-
- p2.h = hi(EBIU_AMGCTL);
- p2.l = lo(EBIU_AMGCTL);
- r0 = AMGCTLVAL;
- w[p2] = r0;
- ssync;
-
- /* This section keeps the processor in supervisor mode
- * during kernel boot. Switches to user mode at end of boot.
- * See page 3-9 of Hardware Reference manual for documentation.
- */
-
- /* EVT15 = _real_start */
-
- p0.l = lo(EVT15);
- p0.h = hi(EVT15);
- p1.l = _real_start;
- p1.h = _real_start;
- [p0] = p1;
- csync;
-
- p0.l = lo(IMASK);
- p0.h = hi(IMASK);
- p1.l = IMASK_IVG15;
- p1.h = 0x0;
- [p0] = p1;
- csync;
-
- raise 15;
- p0.l = .LWAIT_HERE;
- p0.h = .LWAIT_HERE;
- reti = p0;
-#if ANOMALY_05000281
- nop; nop; nop;
-#endif
- rti;
-
-.LWAIT_HERE:
- jump .LWAIT_HERE;
-ENDPROC(__start)
-
-ENTRY(_real_start)
- [ -- sp ] = reti;
- p0.l = lo(WDOGA_CTL);
- p0.h = hi(WDOGA_CTL);
- r0 = 0xAD6(z);
- w[p0] = r0; /* watchdog off for now */
- ssync;
-
- /* Code update for BSS size == 0
- * Zero out the bss region.
- */
-
- p1.l = ___bss_start;
- p1.h = ___bss_start;
- p2.l = ___bss_stop;
- p2.h = ___bss_stop;
- r0 = 0;
- p2 -= p1;
- lsetup (.L_clear_bss, .L_clear_bss) lc0 = p2;
-.L_clear_bss:
- B[p1++] = r0;
-
- /* In case there is a NULL pointer reference
- * Zero out region before stext
- */
-
- p1.l = 0x0;
- p1.h = 0x0;
- r0.l = __stext;
- r0.h = __stext;
- r0 = r0 >> 1;
- p2 = r0;
- r0 = 0;
- lsetup (.L_clear_zero, .L_clear_zero) lc0 = p2;
-.L_clear_zero:
- W[p1++] = r0;
-
- /* pass the uboot arguments to the global value command line */
- R0 = R7;
- call _cmdline_init;
-
- p1.l = __rambase;
- p1.h = __rambase;
- r0.l = __sdata;
- r0.h = __sdata;
- [p1] = r0;
-
- p1.l = __ramstart;
- p1.h = __ramstart;
- p3.l = ___bss_stop;
- p3.h = ___bss_stop;
-
- r1 = p3;
- [p1] = r1;
-
- /*
- * load the current thread pointer and stack
- */
- r1.l = _init_thread_union;
- r1.h = _init_thread_union;
-
- r2.l = 0x2000;
- r2.h = 0x0000;
- r1 = r1 + r2;
- sp = r1;
- usp = sp;
- fp = sp;
- jump.l _start_kernel;
-ENDPROC(_real_start)
-
-__FINIT
-
.section .l1.text
-#if CONFIG_BFIN_KERNEL_CLOCK
+#ifdef CONFIG_BFIN_KERNEL_CLOCK
ENTRY(_start_dma_code)
p0.h = hi(SICA_IWR0);
p0.l = lo(SICA_IWR0);
diff --git a/arch/blackfin/mach-bf561/ints-priority.c b/arch/blackfin/mach-bf561/ints-priority.c
index 09b541b0f7c2..9d2f23344720 100644
--- a/arch/blackfin/mach-bf561/ints-priority.c
+++ b/arch/blackfin/mach-bf561/ints-priority.c
@@ -31,7 +31,7 @@
#include <linux/irq.h>
#include <asm/blackfin.h>
-void program_IAR(void)
+void __init program_IAR(void)
{
/* Program the IAR0 Register with the configured priority */
bfin_write_SICA_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) |
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index 422bfee34adc..e6ed57c56d4b 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -3,9 +3,10 @@
#
obj-y := \
- cache.o cacheinit.o entry.o \
- interrupt.o lock.o irqpanic.o arch_checks.o ints-priority.o
+ cache.o entry.o head.o \
+ interrupt.o irqpanic.o arch_checks.o ints-priority.o
+obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o
obj-$(CONFIG_PM) += pm.o dpmc_modes.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index f9160d83b91f..5986758b2752 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -27,6 +27,7 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <asm/fixed_code.h>
#include <asm/mach/anomaly.h>
#include <asm/mach-common/clocks.h>
@@ -53,3 +54,11 @@
# endif
#endif /* CONFIG_BFIN_KERNEL_CLOCK */
+
+#if CONFIG_BOOT_LOAD < FIXED_CODE_END
+# error "The kernel load address must be after the fixed code section"
+#endif
+
+#if (CONFIG_BOOT_LOAD & 0x3)
+# error "The kernel load address must be 4 byte aligned"
+#endif
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index 0521b1588204..85f8c79b3c37 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -34,81 +34,6 @@
#include <asm/cache.h>
.text
-.align 2
-ENTRY(_cache_invalidate)
-
- /*
- * Icache or DcacheA or DcacheB Invalidation
- * or any combination thereof
- * R0 has bits
- * CPLB_ENABLE_ICACHE_P,CPLB_ENABLE_DCACHE_P,CPLB_ENABLE_DCACHE2_P
- * set as required
- */
- [--SP] = R7;
-
- R7 = R0;
- CC = BITTST(R7,CPLB_ENABLE_ICACHE_P);
- IF !CC JUMP .Lno_icache;
- [--SP] = RETS;
- CALL _icache_invalidate;
- RETS = [SP++];
-.Lno_icache:
- CC = BITTST(R7,CPLB_ENABLE_DCACHE_P);
- IF !CC JUMP .Lno_dcache_a;
- R0 = 0; /* specifies bank A */
- [--SP] = RETS;
- CALL _dcache_invalidate;
- RETS = [SP++];
-.Lno_dcache_a:
- CC = BITTST(R7,CPLB_ENABLE_DCACHE2_P);
- IF !CC JUMP .Lno_dcache_b;
- R0 = 0;
- BITSET(R0, 23); /* specifies bank B */
- [--SP] = RETS;
- CALL _dcache_invalidate;
- RETS = [SP++];
-.Lno_dcache_b:
- R7 = [SP++];
- RTS;
-ENDPROC(_cache_invalidate)
-
-/* Invalidate the Entire Instruction cache by
- * disabling IMC bit
- */
-ENTRY(_icache_invalidate)
-ENTRY(_invalidate_entire_icache)
- [--SP] = ( R7:5);
-
- P0.L = LO(IMEM_CONTROL);
- P0.H = HI(IMEM_CONTROL);
- R7 = [P0];
-
- /* Clear the IMC bit , All valid bits in the instruction
- * cache are set to the invalid state
- */
- BITCLR(R7,IMC_P);
- CLI R6;
- SSYNC; /* SSYNC required before invalidating cache. */
- .align 8;
- [P0] = R7;
- SSYNC;
- STI R6;
-
- /* Configures the instruction cache agian */
- R6 = (IMC | ENICPLB);
- R7 = R7 | R6;
-
- CLI R6;
- SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
- .align 8;
- [P0] = R7;
- SSYNC;
- STI R6;
-
- ( R7:5) = [SP++];
- RTS;
-ENDPROC(_invalidate_entire_icache)
-ENDPROC(_icache_invalidate)
/*
* blackfin_cache_flush_range(start, end)
@@ -190,46 +115,6 @@ ENTRY(_blackfin_dcache_invalidate_range)
RTS;
ENDPROC(_blackfin_dcache_invalidate_range)
-/* Invalidate the Entire Data cache by
- * clearing DMC[1:0] bits
- */
-ENTRY(_invalidate_entire_dcache)
-ENTRY(_dcache_invalidate)
- [--SP] = ( R7:6);
-
- P0.L = LO(DMEM_CONTROL);
- P0.H = HI(DMEM_CONTROL);
- R7 = [P0];
-
- /* Clear the DMC[1:0] bits, All valid bits in the data
- * cache are set to the invalid state
- */
- BITCLR(R7,DMC0_P);
- BITCLR(R7,DMC1_P);
- CLI R6;
- SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
- .align 8;
- [P0] = R7;
- SSYNC;
- STI R6;
-
- /* Configures the data cache again */
-
- R6 = DMEM_CNTR;
- R7 = R7 | R6;
-
- CLI R6;
- SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
- .align 8;
- [P0] = R7;
- SSYNC;
- STI R6;
-
- ( R7:6) = [SP++];
- RTS;
-ENDPROC(_dcache_invalidate)
-ENDPROC(_invalidate_entire_dcache)
-
ENTRY(_blackfin_dcache_flush_range)
R2 = -L1_CACHE_BYTES;
R2 = R0 & R2;
diff --git a/arch/blackfin/mach-common/cacheinit.S b/arch/blackfin/mach-common/cacheinit.S
deleted file mode 100644
index 22fada0c1cb3..000000000000
--- a/arch/blackfin/mach-common/cacheinit.S
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * File: arch/blackfin/mach-common/cacheinit.S
- * Based on:
- * Author: LG Soft India
- *
- * Created: ?
- * Description: cache initialization
- *
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/* This function sets up the data and instruction cache. The
- * tables like icplb table, dcplb table and Page Descriptor table
- * are defined in cplbtab.h. You can configure those tables for
- * your suitable requirements
- */
-
-#include <linux/linkage.h>
-#include <asm/blackfin.h>
-
-.text
-
-#if ANOMALY_05000125
-#if defined(CONFIG_BFIN_ICACHE)
-ENTRY(_bfin_write_IMEM_CONTROL)
-
- /* Enable Instruction Cache */
- P0.l = LO(IMEM_CONTROL);
- P0.h = HI(IMEM_CONTROL);
-
- /* Anomaly 05000125 */
- CLI R1;
- SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
- .align 8;
- [P0] = R0;
- SSYNC;
- STI R1;
- RTS;
-
-ENDPROC(_bfin_write_IMEM_CONTROL)
-#endif
-
-#if defined(CONFIG_BFIN_DCACHE)
-ENTRY(_bfin_write_DMEM_CONTROL)
- P0.l = LO(DMEM_CONTROL);
- P0.h = HI(DMEM_CONTROL);
-
- CLI R1;
- SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
- .align 8;
- [P0] = R0;
- SSYNC;
- STI R1;
- RTS;
-
-ENDPROC(_bfin_write_DMEM_CONTROL)
-#endif
-
-#endif
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 5e3f1d8a4fb8..838b0b2ce9a5 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -78,62 +78,6 @@ ENTRY(_hibernate_mode)
jump .Lforever;
ENDPROC(_hibernate_mode)
-ENTRY(_deep_sleep)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
-
- CLI R4;
-
- R0 = IWR_ENABLE(0);
- R1 = IWR_DISABLE_ALL;
- R2 = IWR_DISABLE_ALL;
-
- call _set_sic_iwr;
-
- call _set_dram_srfs;
-
- /* Clear all the interrupts,bits sticky */
- R0 = 0xFFFF (Z);
- call _set_rtc_istat
-
- P0.H = hi(PLL_CTL);
- P0.L = lo(PLL_CTL);
- R0 = W[P0](z);
- BITSET (R0, 5);
- W[P0] = R0.L;
-
- call _test_pll_locked;
-
- SSYNC;
- IDLE;
-
- call _unset_dram_srfs;
-
- call _test_pll_locked;
-
- R0 = IWR_ENABLE(0);
- R1 = IWR_DISABLE_ALL;
- R2 = IWR_DISABLE_ALL;
-
- call _set_sic_iwr;
-
- P0.H = hi(PLL_CTL);
- P0.L = lo(PLL_CTL);
- R0 = w[p0](z);
- BITCLR (R0, 3);
- BITCLR (R0, 5);
- BITCLR (R0, 8);
- w[p0] = R0;
- IDLE;
- call _test_pll_locked;
-
- STI R4;
-
- RETS = [SP++];
- ( R7:0, P5:0 ) = [SP++];
- RTS;
-ENDPROC(_deep_sleep)
-
ENTRY(_sleep_deeper)
[--SP] = ( R7:0, P5:0 );
[--SP] = RETS;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index eceb484d90f9..117c01c2c6b0 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -158,14 +158,16 @@ ENTRY(_ex_single_step)
cc = r7 == r6;
if cc jump _bfin_return_from_exception;
+#ifdef CONFIG_KGDB
/* Don't do single step in hardware exception handler */
p5.l = lo(IPEND);
p5.h = hi(IPEND);
r6 = [p5];
+ cc = bittst(r6, 4);
+ if cc jump _bfin_return_from_exception;
cc = bittst(r6, 5);
if cc jump _bfin_return_from_exception;
-#ifdef CONFIG_KGDB
/* skip single step if current interrupt priority is higher than
* that of the first instruction, from which gdb starts single step */
r6 >>= 6;
@@ -186,17 +188,27 @@ ENTRY(_ex_single_step)
if cc jump .Ldo_single_step;
r6 += -1;
cc = r6 < r7;
- if cc jump _bfin_return_from_exception;
+ if cc jump 1f;
.Ldo_single_step:
-#endif
-
+#else
/* If we were in user mode, do the single step normally. */
+ p5.l = lo(IPEND);
+ p5.h = hi(IPEND);
r6 = [p5];
r7 = 0xffe0 (z);
r7 = r7 & r6;
cc = r7 == 0;
- if cc jump 1f;
+ if !cc jump 1f;
+#endif
+ /* Single stepping only a single instruction, so clear the trace
+ * bit here. */
+ r7 = syscfg;
+ bitclr (r7, 0);
+ syscfg = R7;
+ jump _ex_trap_c;
+
+1:
/*
* We were in an interrupt handler. By convention, all of them save
* SYSCFG with their first instruction, so by checking whether our
@@ -224,15 +236,11 @@ ENTRY(_ex_single_step)
cc = R7 == R6;
if !cc jump _bfin_return_from_exception;
-1:
- /* Single stepping only a single instruction, so clear the trace
- * bit here. */
r7 = syscfg;
bitclr (r7, 0);
syscfg = R7;
- jump _ex_trap_c;
-
+ /* Fall through to _bfin_return_from_exception. */
ENDPROC(_ex_single_step)
ENTRY(_bfin_return_from_exception)
@@ -1414,6 +1422,12 @@ ENTRY(_sys_call_table)
.long _sys_semtimedop
.long _sys_timerfd_settime
.long _sys_timerfd_gettime
+ .long _sys_signalfd4 /* 360 */
+ .long _sys_eventfd2
+ .long _sys_epoll_create1
+ .long _sys_dup3
+ .long _sys_pipe2
+ .long _sys_inotify_init1 /* 365 */
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
new file mode 100644
index 000000000000..191b4e974c4b
--- /dev/null
+++ b/arch/blackfin/mach-common/head.S
@@ -0,0 +1,207 @@
+/*
+ * Common Blackfin startup code
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/blackfin.h>
+#include <asm/thread_info.h>
+#include <asm/trace.h>
+
+__INIT
+
+#define INITIAL_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
+
+ENTRY(__start)
+ /* R0: argument of command line string, passed from uboot, save it */
+ R7 = R0;
+ /* Enable Cycle Counter and Nesting Of Interrupts */
+#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
+ R0 = SYSCFG_SNEN;
+#else
+ R0 = SYSCFG_SNEN | SYSCFG_CCEN;
+#endif
+ SYSCFG = R0;
+ R0 = 0;
+
+ /* Clear Out All the data and pointer Registers */
+ R1 = R0;
+ R2 = R0;
+ R3 = R0;
+ R4 = R0;
+ R5 = R0;
+ R6 = R0;
+
+ P0 = R0;
+ P1 = R0;
+ P2 = R0;
+ P3 = R0;
+ P4 = R0;
+ P5 = R0;
+
+ LC0 = r0;
+ LC1 = r0;
+ L0 = r0;
+ L1 = r0;
+ L2 = r0;
+ L3 = r0;
+
+ /* Clear Out All the DAG Registers */
+ B0 = r0;
+ B1 = r0;
+ B2 = r0;
+ B3 = r0;
+
+ I0 = r0;
+ I1 = r0;
+ I2 = r0;
+ I3 = r0;
+
+ M0 = r0;
+ M1 = r0;
+ M2 = r0;
+ M3 = r0;
+
+ trace_buffer_init(p0,r0);
+ P0 = R1;
+ R0 = R1;
+
+ /* Turn off the icache */
+ p0.l = LO(IMEM_CONTROL);
+ p0.h = HI(IMEM_CONTROL);
+ R1 = [p0];
+ R0 = ~ENICPLB;
+ R0 = R0 & R1;
+ [p0] = R0;
+ SSYNC;
+
+ /* Turn off the dcache */
+ p0.l = LO(DMEM_CONTROL);
+ p0.h = HI(DMEM_CONTROL);
+ R1 = [p0];
+ R0 = ~ENDCPLB;
+ R0 = R0 & R1;
+ [p0] = R0;
+ SSYNC;
+
+ /* Save RETX, in case of doublefault */
+ p0.l = ___retx;
+ p0.h = ___retx;
+ R0 = RETX;
+ [P0] = R0;
+
+ /* Initialize stack pointer */
+ sp.l = lo(INITIAL_STACK);
+ sp.h = hi(INITIAL_STACK);
+ fp = sp;
+ usp = sp;
+
+#ifdef CONFIG_EARLY_PRINTK
+ call _init_early_exception_vectors;
+#endif
+
+ /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
+ call _bf53x_relocate_l1_mem;
+#ifdef CONFIG_BFIN_KERNEL_CLOCK
+ call _start_dma_code;
+#endif
+
+ /* This section keeps the processor in supervisor mode
+ * during kernel boot. Switches to user mode at end of boot.
+ * See page 3-9 of Hardware Reference manual for documentation.
+ */
+
+ /* EVT15 = _real_start */
+
+ p0.l = lo(EVT15);
+ p0.h = hi(EVT15);
+ p1.l = _real_start;
+ p1.h = _real_start;
+ [p0] = p1;
+ csync;
+
+ p0.l = lo(IMASK);
+ p0.h = hi(IMASK);
+ p1.l = IMASK_IVG15;
+ p1.h = 0x0;
+ [p0] = p1;
+ csync;
+
+ raise 15;
+ p0.l = .LWAIT_HERE;
+ p0.h = .LWAIT_HERE;
+ reti = p0;
+#if ANOMALY_05000281
+ nop; nop; nop;
+#endif
+ rti;
+
+.LWAIT_HERE:
+ jump .LWAIT_HERE;
+ENDPROC(__start)
+
+/* A little BF561 glue ... */
+#ifndef WDOG_CTL
+# define WDOG_CTL WDOGA_CTL
+#endif
+
+ENTRY(_real_start)
+ /* Enable nested interrupts */
+ [--sp] = reti;
+
+ /* watchdog off for now */
+ p0.l = lo(WDOG_CTL);
+ p0.h = hi(WDOG_CTL);
+ r0 = 0xAD6(z);
+ w[p0] = r0;
+ ssync;
+
+ /* Zero out the bss region
+ * Note: this will fail if bss is 0 bytes ...
+ */
+ r0 = 0 (z);
+ r1.l = ___bss_start;
+ r1.h = ___bss_start;
+ r2.l = ___bss_stop;
+ r2.h = ___bss_stop;
+ r2 = r2 - r1;
+ r2 >>= 2;
+ p1 = r1;
+ p2 = r2;
+ lsetup (.L_clear_bss, .L_clear_bss) lc0 = p2;
+.L_clear_bss:
+ [p1++] = r0;
+
+ /* In case there is a NULL pointer reference,
+ * zero out region before stext
+ */
+ p1 = r0;
+ r2.l = __stext;
+ r2.h = __stext;
+ r2 >>= 2;
+ p2 = r2;
+ lsetup (.L_clear_zero, .L_clear_zero) lc0 = p2;
+.L_clear_zero:
+ [p1++] = r0;
+
+ /* Pass the u-boot arguments to the global value command line */
+ R0 = R7;
+ call _cmdline_init;
+
+ /* Load the current thread pointer and stack */
+ sp.l = _init_thread_union;
+ sp.h = _init_thread_union;
+ p1 = THREAD_SIZE (z);
+ sp = sp + p1;
+ usp = sp;
+ fp = sp;
+ jump.l _start_kernel;
+ENDPROC(_real_start)
+
+__FINIT
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 64d746114e4b..62f8883a5c27 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -71,6 +71,7 @@ atomic_t num_spurious;
#ifdef CONFIG_PM
unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
+unsigned vr_wakeup;
#endif
struct ivgx {
@@ -184,17 +185,56 @@ static void bfin_internal_unmask_irq(unsigned int irq)
#ifdef CONFIG_PM
int bfin_internal_set_wake(unsigned int irq, unsigned int state)
{
- unsigned bank, bit;
+ unsigned bank, bit, wakeup = 0;
unsigned long flags;
bank = SIC_SYSIRQ(irq) / 32;
bit = SIC_SYSIRQ(irq) % 32;
+ switch (irq) {
+#ifdef IRQ_RTC
+ case IRQ_RTC:
+ wakeup |= WAKE;
+ break;
+#endif
+#ifdef IRQ_CAN0_RX
+ case IRQ_CAN0_RX:
+ wakeup |= CANWE;
+ break;
+#endif
+#ifdef IRQ_CAN1_RX
+ case IRQ_CAN1_RX:
+ wakeup |= CANWE;
+ break;
+#endif
+#ifdef IRQ_USB_INT0
+ case IRQ_USB_INT0:
+ wakeup |= USBWE;
+ break;
+#endif
+#ifdef IRQ_KEY
+ case IRQ_KEY:
+ wakeup |= KPADWE;
+ break;
+#endif
+#ifdef IRQ_CNT
+ case IRQ_CNT:
+ wakeup |= ROTWE;
+ break;
+#endif
+ default:
+ break;
+ }
+
local_irq_save(flags);
- if (state)
+ if (state) {
bfin_sic_iwr[bank] |= (1 << bit);
- else
+ vr_wakeup |= wakeup;
+
+ } else {
bfin_sic_iwr[bank] &= ~(1 << bit);
+ vr_wakeup &= ~wakeup;
+ }
local_irq_restore(flags);
@@ -943,6 +983,11 @@ int __init init_arch_irq(void)
local_irq_disable();
+#if defined(CONFIG_BF527) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
+ /* Clear EMAC Interrupt Status bits so we can demux it later */
+ bfin_write_EMAC_SYSTAT(-1);
+#endif
+
#ifdef CONFIG_BF54x
# ifdef CONFIG_PINTx_REASSIGN
pint[0]->assign = CONFIG_PINT0_ASSIGN;
@@ -1028,13 +1073,22 @@ int __init init_arch_irq(void)
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
- bfin_write_SIC_IWR0(IWR_ENABLE_ALL);
- bfin_write_SIC_IWR1(IWR_ENABLE_ALL);
+ bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
+#if defined(CONFIG_BF52x)
+ /* BF52x system reset does not properly reset SIC_IWR1 which
+ * will screw up the bootrom as it relies on MDMA0/1 waking it
+ * up from IDLE instructions. See this report for more info:
+ * http://blackfin.uclinux.org/gf/tracker/4323
+ */
+ bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
+#else
+ bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
+#endif
# ifdef CONFIG_BF54x
- bfin_write_SIC_IWR2(IWR_ENABLE_ALL);
+ bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
- bfin_write_SIC_IWR(IWR_ENABLE_ALL);
+ bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
return 0;
diff --git a/arch/blackfin/mach-common/lock.S b/arch/blackfin/mach-common/lock.S
index 30b887e67dd6..9daf01201e9f 100644
--- a/arch/blackfin/mach-common/lock.S
+++ b/arch/blackfin/mach-common/lock.S
@@ -28,13 +28,10 @@
*/
#include <linux/linkage.h>
-#include <asm/cplb.h>
#include <asm/blackfin.h>
.text
-#ifdef CONFIG_BFIN_ICACHE_LOCK
-
/* When you come here, it is assumed that
* R0 - Which way to be locked
*/
@@ -189,18 +186,38 @@ ENTRY(_cache_lock)
RTS;
ENDPROC(_cache_lock)
-#endif /* BFIN_ICACHE_LOCK */
-
-/* Return the ILOC bits of IMEM_CONTROL
+/* Invalidate the Entire Instruction cache by
+ * disabling IMC bit
*/
+ENTRY(_invalidate_entire_icache)
+ [--SP] = ( R7:5);
-ENTRY(_read_iloc)
- P1.H = HI(IMEM_CONTROL);
- P1.L = LO(IMEM_CONTROL);
- R1 = 0xF;
- R0 = [P1];
- R0 = R0 >> 3;
- R0 = R0 & R1;
+ P0.L = LO(IMEM_CONTROL);
+ P0.H = HI(IMEM_CONTROL);
+ R7 = [P0];
+
+ /* Clear the IMC bit , All valid bits in the instruction
+ * cache are set to the invalid state
+ */
+ BITCLR(R7,IMC_P);
+ CLI R6;
+ SSYNC; /* SSYNC required before invalidating cache. */
+ .align 8;
+ [P0] = R7;
+ SSYNC;
+ STI R6;
+
+ /* Configures the instruction cache agian */
+ R6 = (IMC | ENICPLB);
+ R7 = R7 | R6;
+
+ CLI R6;
+ SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
+ .align 8;
+ [P0] = R7;
+ SSYNC;
+ STI R6;
+ ( R7:5) = [SP++];
RTS;
-ENDPROC(_read_iloc)
+ENDPROC(_invalidate_entire_icache)
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 4fe6a2366b13..e28c6af1f415 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -83,13 +83,22 @@ void bfin_pm_suspend_standby_enter(void)
bfin_pm_standby_restore();
#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
- bfin_write_SIC_IWR0(IWR_ENABLE_ALL);
- bfin_write_SIC_IWR1(IWR_ENABLE_ALL);
+ bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
+#if defined(CONFIG_BF52x)
+ /* BF52x system reset does not properly reset SIC_IWR1 which
+ * will screw up the bootrom as it relies on MDMA0/1 waking it
+ * up from IDLE instructions. See this report for more info:
+ * http://blackfin.uclinux.org/gf/tracker/4323
+ */
+ bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
+#else
+ bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
+#endif
# ifdef CONFIG_BF54x
- bfin_write_SIC_IWR2(IWR_ENABLE_ALL);
+ bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
- bfin_write_SIC_IWR(IWR_ENABLE_ALL);
+ bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
local_irq_restore(flags);
@@ -229,28 +238,12 @@ int bfin_pm_suspend_mem_enter(void)
wakeup = bfin_read_VR_CTL() & ~FREQ;
wakeup |= SCKELOW;
- /* FIXME: merge this somehow with set_irq_wake */
-#ifdef CONFIG_PM_BFIN_WAKE_RTC
- wakeup |= WAKE;
-#endif
#ifdef CONFIG_PM_BFIN_WAKE_PH6
wakeup |= PHYWE;
#endif
-#ifdef CONFIG_PM_BFIN_WAKE_CAN
- wakeup |= CANWE;
-#endif
#ifdef CONFIG_PM_BFIN_WAKE_GP
wakeup |= GPWE;
#endif
-#ifdef CONFIG_PM_BFIN_WAKE_USB
- wakeup |= USBWE;
-#endif
-#ifdef CONFIG_PM_BFIN_WAKE_KEYPAD
- wakeup |= KPADWE;
-#endif
-#ifdef CONFIG_PM_BFIN_WAKE_ROTARY
- wakeup |= ROTWE;
-#endif
local_irq_save(flags);
@@ -268,7 +261,7 @@ int bfin_pm_suspend_mem_enter(void)
icache_disable();
bf53x_suspend_l1_mem(memptr);
- do_hibernate(wakeup); /* Goodbye */
+ do_hibernate(wakeup | vr_wakeup); /* Goodbye */
bf53x_resume_l1_mem(memptr);
diff --git a/arch/blackfin/mm/blackfin_sram.c b/arch/blackfin/mm/blackfin_sram.c
index 5af3c31c9365..9d2be43ac3da 100644
--- a/arch/blackfin/mm/blackfin_sram.c
+++ b/arch/blackfin/mm/blackfin_sram.c
@@ -66,7 +66,7 @@ static struct sram_piece free_l1_data_B_sram_head, used_l1_data_B_sram_head;
static struct sram_piece free_l1_inst_sram_head, used_l1_inst_sram_head;
#endif
-#ifdef L2_LENGTH
+#if L2_LENGTH != 0
static struct sram_piece free_l2_sram_head, used_l2_sram_head;
#endif
@@ -175,7 +175,7 @@ static void __init l1_inst_sram_init(void)
static void __init l2_sram_init(void)
{
-#ifdef L2_LENGTH
+#if L2_LENGTH != 0
free_l2_sram_head.next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!free_l2_sram_head.next) {
@@ -367,7 +367,7 @@ int sram_free(const void *addr)
&& addr < (void *)(L1_DATA_B_START + L1_DATA_B_LENGTH))
return l1_data_B_sram_free(addr);
#endif
-#ifdef L2_LENGTH
+#if L2_LENGTH != 0
else if (addr >= (void *)L2_START
&& addr < (void *)(L2_START + L2_LENGTH))
return l2_sram_free(addr);
@@ -604,7 +604,7 @@ int l1sram_free(const void *addr)
void *l2_sram_alloc(size_t size)
{
-#ifdef L2_LENGTH
+#if L2_LENGTH != 0
unsigned flags;
void *addr;
@@ -640,7 +640,7 @@ EXPORT_SYMBOL(l2_sram_zalloc);
int l2_sram_free(const void *addr)
{
-#ifdef L2_LENGTH
+#if L2_LENGTH != 0
unsigned flags;
int ret;
@@ -779,7 +779,7 @@ static int sram_proc_read(char *buf, char **start, off_t offset, int count,
&free_l1_inst_sram_head, &used_l1_inst_sram_head))
goto not_done;
#endif
-#ifdef L2_LENGTH
+#if L2_LENGTH != 0
if (_sram_proc_read(buf, &len, count, "L2",
&free_l2_sram_head, &used_l2_sram_head))
goto not_done;
diff --git a/include/asm-h8300/Kbuild b/arch/h8300/include/asm/Kbuild
index c68e1680da01..c68e1680da01 100644
--- a/include/asm-h8300/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
diff --git a/include/asm-h8300/a.out.h b/arch/h8300/include/asm/a.out.h
index ded780f0a492..ded780f0a492 100644
--- a/include/asm-h8300/a.out.h
+++ b/arch/h8300/include/asm/a.out.h
diff --git a/include/asm-h8300/atomic.h b/arch/h8300/include/asm/atomic.h
index b4cf0ea97ede..b4cf0ea97ede 100644
--- a/include/asm-h8300/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
diff --git a/include/asm-h8300/auxvec.h b/arch/h8300/include/asm/auxvec.h
index 1d36fe38b088..1d36fe38b088 100644
--- a/include/asm-h8300/auxvec.h
+++ b/arch/h8300/include/asm/auxvec.h
diff --git a/include/asm-h8300/bitops.h b/arch/h8300/include/asm/bitops.h
index cb18e3b0aa94..cb18e3b0aa94 100644
--- a/include/asm-h8300/bitops.h
+++ b/arch/h8300/include/asm/bitops.h
diff --git a/include/asm-h8300/bootinfo.h b/arch/h8300/include/asm/bootinfo.h
index 5bed7e7aac0a..5bed7e7aac0a 100644
--- a/include/asm-h8300/bootinfo.h
+++ b/arch/h8300/include/asm/bootinfo.h
diff --git a/include/asm-h8300/bug.h b/arch/h8300/include/asm/bug.h
index edddf5b086e5..edddf5b086e5 100644
--- a/include/asm-h8300/bug.h
+++ b/arch/h8300/include/asm/bug.h
diff --git a/include/asm-h8300/bugs.h b/arch/h8300/include/asm/bugs.h
index 1cb4afba6eb1..1cb4afba6eb1 100644
--- a/include/asm-h8300/bugs.h
+++ b/arch/h8300/include/asm/bugs.h
diff --git a/include/asm-h8300/byteorder.h b/arch/h8300/include/asm/byteorder.h
index 36e597d61619..36e597d61619 100644
--- a/include/asm-h8300/byteorder.h
+++ b/arch/h8300/include/asm/byteorder.h
diff --git a/include/asm-h8300/cache.h b/arch/h8300/include/asm/cache.h
index c6350283649d..c6350283649d 100644
--- a/include/asm-h8300/cache.h
+++ b/arch/h8300/include/asm/cache.h
diff --git a/include/asm-h8300/cachectl.h b/arch/h8300/include/asm/cachectl.h
index c464022d8e26..c464022d8e26 100644
--- a/include/asm-h8300/cachectl.h
+++ b/arch/h8300/include/asm/cachectl.h
diff --git a/include/asm-h8300/cacheflush.h b/arch/h8300/include/asm/cacheflush.h
index 5ffdca217b95..5ffdca217b95 100644
--- a/include/asm-h8300/cacheflush.h
+++ b/arch/h8300/include/asm/cacheflush.h
diff --git a/include/asm-h8300/checksum.h b/arch/h8300/include/asm/checksum.h
index 98724e12508c..98724e12508c 100644
--- a/include/asm-h8300/checksum.h
+++ b/arch/h8300/include/asm/checksum.h
diff --git a/include/asm-h8300/cputime.h b/arch/h8300/include/asm/cputime.h
index 092e187c7b08..092e187c7b08 100644
--- a/include/asm-h8300/cputime.h
+++ b/arch/h8300/include/asm/cputime.h
diff --git a/include/asm-h8300/current.h b/arch/h8300/include/asm/current.h
index 57d74ee55a14..57d74ee55a14 100644
--- a/include/asm-h8300/current.h
+++ b/arch/h8300/include/asm/current.h
diff --git a/include/asm-h8300/dbg.h b/arch/h8300/include/asm/dbg.h
index 2c6d1cbcf736..2c6d1cbcf736 100644
--- a/include/asm-h8300/dbg.h
+++ b/arch/h8300/include/asm/dbg.h
diff --git a/include/asm-h8300/delay.h b/arch/h8300/include/asm/delay.h
index 743beba70f82..743beba70f82 100644
--- a/include/asm-h8300/delay.h
+++ b/arch/h8300/include/asm/delay.h
diff --git a/include/asm-h8300/device.h b/arch/h8300/include/asm/device.h
index d8f9872b0e2d..d8f9872b0e2d 100644
--- a/include/asm-h8300/device.h
+++ b/arch/h8300/include/asm/device.h
diff --git a/include/asm-h8300/div64.h b/arch/h8300/include/asm/div64.h
index 6cd978cefb28..6cd978cefb28 100644
--- a/include/asm-h8300/div64.h
+++ b/arch/h8300/include/asm/div64.h
diff --git a/include/asm-h8300/dma.h b/arch/h8300/include/asm/dma.h
index 3edbaaaedf5b..3edbaaaedf5b 100644
--- a/include/asm-h8300/dma.h
+++ b/arch/h8300/include/asm/dma.h
diff --git a/include/asm-h8300/elf.h b/arch/h8300/include/asm/elf.h
index a8b57d1f4128..a8b57d1f4128 100644
--- a/include/asm-h8300/elf.h
+++ b/arch/h8300/include/asm/elf.h
diff --git a/include/asm-h8300/emergency-restart.h b/arch/h8300/include/asm/emergency-restart.h
index 108d8c48e42e..108d8c48e42e 100644
--- a/include/asm-h8300/emergency-restart.h
+++ b/arch/h8300/include/asm/emergency-restart.h
diff --git a/include/asm-h8300/errno.h b/arch/h8300/include/asm/errno.h
index 0c2f5641fdcc..0c2f5641fdcc 100644
--- a/include/asm-h8300/errno.h
+++ b/arch/h8300/include/asm/errno.h
diff --git a/include/asm-h8300/fb.h b/arch/h8300/include/asm/fb.h
index c7df38030992..c7df38030992 100644
--- a/include/asm-h8300/fb.h
+++ b/arch/h8300/include/asm/fb.h
diff --git a/include/asm-h8300/fcntl.h b/arch/h8300/include/asm/fcntl.h
index 1952cb2e3b06..1952cb2e3b06 100644
--- a/include/asm-h8300/fcntl.h
+++ b/arch/h8300/include/asm/fcntl.h
diff --git a/include/asm-h8300/flat.h b/arch/h8300/include/asm/flat.h
index 2a873508a9a1..2a873508a9a1 100644
--- a/include/asm-h8300/flat.h
+++ b/arch/h8300/include/asm/flat.h
diff --git a/include/asm-h8300/fpu.h b/arch/h8300/include/asm/fpu.h
index 4fc416e80bef..4fc416e80bef 100644
--- a/include/asm-h8300/fpu.h
+++ b/arch/h8300/include/asm/fpu.h
diff --git a/include/asm-h8300/futex.h b/arch/h8300/include/asm/futex.h
index 6a332a9f099c..6a332a9f099c 100644
--- a/include/asm-h8300/futex.h
+++ b/arch/h8300/include/asm/futex.h
diff --git a/include/asm-h8300/gpio.h b/arch/h8300/include/asm/gpio.h
index a714f0c0efbc..a714f0c0efbc 100644
--- a/include/asm-h8300/gpio.h
+++ b/arch/h8300/include/asm/gpio.h
diff --git a/include/asm-h8300/hardirq.h b/arch/h8300/include/asm/hardirq.h
index 9d7f7a7462b2..9d7f7a7462b2 100644
--- a/include/asm-h8300/hardirq.h
+++ b/arch/h8300/include/asm/hardirq.h
diff --git a/include/asm-h8300/hw_irq.h b/arch/h8300/include/asm/hw_irq.h
index d75a5a1119e8..d75a5a1119e8 100644
--- a/include/asm-h8300/hw_irq.h
+++ b/arch/h8300/include/asm/hw_irq.h
diff --git a/include/asm-h8300/io.h b/arch/h8300/include/asm/io.h
index 26dc6ccd9441..26dc6ccd9441 100644
--- a/include/asm-h8300/io.h
+++ b/arch/h8300/include/asm/io.h
diff --git a/include/asm-h8300/ioctl.h b/arch/h8300/include/asm/ioctl.h
index b279fe06dfe5..b279fe06dfe5 100644
--- a/include/asm-h8300/ioctl.h
+++ b/arch/h8300/include/asm/ioctl.h
diff --git a/include/asm-h8300/ioctls.h b/arch/h8300/include/asm/ioctls.h
index 98a53d067269..98a53d067269 100644
--- a/include/asm-h8300/ioctls.h
+++ b/arch/h8300/include/asm/ioctls.h
diff --git a/include/asm-h8300/ipcbuf.h b/arch/h8300/include/asm/ipcbuf.h
index 2cd1ebcc109d..2cd1ebcc109d 100644
--- a/include/asm-h8300/ipcbuf.h
+++ b/arch/h8300/include/asm/ipcbuf.h
diff --git a/include/asm-h8300/irq.h b/arch/h8300/include/asm/irq.h
index 13d7c601cd0a..13d7c601cd0a 100644
--- a/include/asm-h8300/irq.h
+++ b/arch/h8300/include/asm/irq.h
diff --git a/include/asm-h8300/irq_regs.h b/arch/h8300/include/asm/irq_regs.h
index 3dd9c0b70270..3dd9c0b70270 100644
--- a/include/asm-h8300/irq_regs.h
+++ b/arch/h8300/include/asm/irq_regs.h
diff --git a/include/asm-h8300/kdebug.h b/arch/h8300/include/asm/kdebug.h
index 6ece1b037665..6ece1b037665 100644
--- a/include/asm-h8300/kdebug.h
+++ b/arch/h8300/include/asm/kdebug.h
diff --git a/include/asm-h8300/kmap_types.h b/arch/h8300/include/asm/kmap_types.h
index 1ec8a3427120..1ec8a3427120 100644
--- a/include/asm-h8300/kmap_types.h
+++ b/arch/h8300/include/asm/kmap_types.h
diff --git a/include/asm-h8300/linkage.h b/arch/h8300/include/asm/linkage.h
index 6f4df7d46180..6f4df7d46180 100644
--- a/include/asm-h8300/linkage.h
+++ b/arch/h8300/include/asm/linkage.h
diff --git a/include/asm-h8300/local.h b/arch/h8300/include/asm/local.h
index fdd4efe437cd..fdd4efe437cd 100644
--- a/include/asm-h8300/local.h
+++ b/arch/h8300/include/asm/local.h
diff --git a/include/asm-h8300/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h
index ab9d9646d241..ab9d9646d241 100644
--- a/include/asm-h8300/mc146818rtc.h
+++ b/arch/h8300/include/asm/mc146818rtc.h
diff --git a/include/asm-h8300/md.h b/arch/h8300/include/asm/md.h
index 1a47dc6691fb..1a47dc6691fb 100644
--- a/include/asm-h8300/md.h
+++ b/arch/h8300/include/asm/md.h
diff --git a/include/asm-h8300/mman.h b/arch/h8300/include/asm/mman.h
index b9f104f22a36..b9f104f22a36 100644
--- a/include/asm-h8300/mman.h
+++ b/arch/h8300/include/asm/mman.h
diff --git a/include/asm-h8300/mmu.h b/arch/h8300/include/asm/mmu.h
index 2ce06ea46104..2ce06ea46104 100644
--- a/include/asm-h8300/mmu.h
+++ b/arch/h8300/include/asm/mmu.h
diff --git a/include/asm-h8300/mmu_context.h b/arch/h8300/include/asm/mmu_context.h
index f44b730da54d..f44b730da54d 100644
--- a/include/asm-h8300/mmu_context.h
+++ b/arch/h8300/include/asm/mmu_context.h
diff --git a/include/asm-h8300/module.h b/arch/h8300/include/asm/module.h
index de23231f3196..de23231f3196 100644
--- a/include/asm-h8300/module.h
+++ b/arch/h8300/include/asm/module.h
diff --git a/include/asm-h8300/msgbuf.h b/arch/h8300/include/asm/msgbuf.h
index 6b148cd09aa5..6b148cd09aa5 100644
--- a/include/asm-h8300/msgbuf.h
+++ b/arch/h8300/include/asm/msgbuf.h
diff --git a/include/asm-h8300/mutex.h b/arch/h8300/include/asm/mutex.h
index 458c1f7fbc18..458c1f7fbc18 100644
--- a/include/asm-h8300/mutex.h
+++ b/arch/h8300/include/asm/mutex.h
diff --git a/include/asm-h8300/page.h b/arch/h8300/include/asm/page.h
index 0b6acf0b03aa..0b6acf0b03aa 100644
--- a/include/asm-h8300/page.h
+++ b/arch/h8300/include/asm/page.h
diff --git a/include/asm-h8300/page_offset.h b/arch/h8300/include/asm/page_offset.h
index f8706463008c..f8706463008c 100644
--- a/include/asm-h8300/page_offset.h
+++ b/arch/h8300/include/asm/page_offset.h
diff --git a/include/asm-h8300/param.h b/arch/h8300/include/asm/param.h
index 1c72fb8080ff..1c72fb8080ff 100644
--- a/include/asm-h8300/param.h
+++ b/arch/h8300/include/asm/param.h
diff --git a/include/asm-h8300/pci.h b/arch/h8300/include/asm/pci.h
index 97389b35aa35..97389b35aa35 100644
--- a/include/asm-h8300/pci.h
+++ b/arch/h8300/include/asm/pci.h
diff --git a/include/asm-h8300/percpu.h b/arch/h8300/include/asm/percpu.h
index 72c03e3666d8..72c03e3666d8 100644
--- a/include/asm-h8300/percpu.h
+++ b/arch/h8300/include/asm/percpu.h
diff --git a/include/asm-h8300/pgalloc.h b/arch/h8300/include/asm/pgalloc.h
index c2e89a286d23..c2e89a286d23 100644
--- a/include/asm-h8300/pgalloc.h
+++ b/arch/h8300/include/asm/pgalloc.h
diff --git a/include/asm-h8300/pgtable.h b/arch/h8300/include/asm/pgtable.h
index a09230a08e02..a09230a08e02 100644
--- a/include/asm-h8300/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
diff --git a/include/asm-h8300/poll.h b/arch/h8300/include/asm/poll.h
index f61540c22d94..f61540c22d94 100644
--- a/include/asm-h8300/poll.h
+++ b/arch/h8300/include/asm/poll.h
diff --git a/include/asm-h8300/posix_types.h b/arch/h8300/include/asm/posix_types.h
index 5c553927fc53..5c553927fc53 100644
--- a/include/asm-h8300/posix_types.h
+++ b/arch/h8300/include/asm/posix_types.h
diff --git a/include/asm-h8300/processor.h b/arch/h8300/include/asm/processor.h
index 69e8a34eb6d5..69e8a34eb6d5 100644
--- a/include/asm-h8300/processor.h
+++ b/arch/h8300/include/asm/processor.h
diff --git a/include/asm-h8300/ptrace.h b/arch/h8300/include/asm/ptrace.h
index c2e05e4b512e..c2e05e4b512e 100644
--- a/include/asm-h8300/ptrace.h
+++ b/arch/h8300/include/asm/ptrace.h
diff --git a/include/asm-h8300/regs267x.h b/arch/h8300/include/asm/regs267x.h
index 1bff731a9f77..1bff731a9f77 100644
--- a/include/asm-h8300/regs267x.h
+++ b/arch/h8300/include/asm/regs267x.h
diff --git a/include/asm-h8300/regs306x.h b/arch/h8300/include/asm/regs306x.h
index 027dd633fa25..027dd633fa25 100644
--- a/include/asm-h8300/regs306x.h
+++ b/arch/h8300/include/asm/regs306x.h
diff --git a/include/asm-h8300/resource.h b/arch/h8300/include/asm/resource.h
index 46c5f4391607..46c5f4391607 100644
--- a/include/asm-h8300/resource.h
+++ b/arch/h8300/include/asm/resource.h
diff --git a/include/asm-h8300/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
index d3ecdd87ac90..d3ecdd87ac90 100644
--- a/include/asm-h8300/scatterlist.h
+++ b/arch/h8300/include/asm/scatterlist.h
diff --git a/include/asm-h8300/sections.h b/arch/h8300/include/asm/sections.h
index a81743e8b743..a81743e8b743 100644
--- a/include/asm-h8300/sections.h
+++ b/arch/h8300/include/asm/sections.h
diff --git a/include/asm-h8300/segment.h b/arch/h8300/include/asm/segment.h
index b79a82d0f99d..b79a82d0f99d 100644
--- a/include/asm-h8300/segment.h
+++ b/arch/h8300/include/asm/segment.h
diff --git a/include/asm-h8300/sembuf.h b/arch/h8300/include/asm/sembuf.h
index e04a3ec0cb92..e04a3ec0cb92 100644
--- a/include/asm-h8300/sembuf.h
+++ b/arch/h8300/include/asm/sembuf.h
diff --git a/include/asm-h8300/setup.h b/arch/h8300/include/asm/setup.h
index e2c600e96733..e2c600e96733 100644
--- a/include/asm-h8300/setup.h
+++ b/arch/h8300/include/asm/setup.h
diff --git a/include/asm-h8300/sh_bios.h b/arch/h8300/include/asm/sh_bios.h
index b6bb6e58295c..b6bb6e58295c 100644
--- a/include/asm-h8300/sh_bios.h
+++ b/arch/h8300/include/asm/sh_bios.h
diff --git a/include/asm-h8300/shm.h b/arch/h8300/include/asm/shm.h
index ed6623c0545d..ed6623c0545d 100644
--- a/include/asm-h8300/shm.h
+++ b/arch/h8300/include/asm/shm.h
diff --git a/include/asm-h8300/shmbuf.h b/arch/h8300/include/asm/shmbuf.h
index 64e77993a7a9..64e77993a7a9 100644
--- a/include/asm-h8300/shmbuf.h
+++ b/arch/h8300/include/asm/shmbuf.h
diff --git a/include/asm-h8300/shmparam.h b/arch/h8300/include/asm/shmparam.h
index d1863953ec64..d1863953ec64 100644
--- a/include/asm-h8300/shmparam.h
+++ b/arch/h8300/include/asm/shmparam.h
diff --git a/include/asm-h8300/sigcontext.h b/arch/h8300/include/asm/sigcontext.h
index e4b81505f8f8..e4b81505f8f8 100644
--- a/include/asm-h8300/sigcontext.h
+++ b/arch/h8300/include/asm/sigcontext.h
diff --git a/include/asm-h8300/siginfo.h b/arch/h8300/include/asm/siginfo.h
index bc8fbea931a5..bc8fbea931a5 100644
--- a/include/asm-h8300/siginfo.h
+++ b/arch/h8300/include/asm/siginfo.h
diff --git a/include/asm-h8300/signal.h b/arch/h8300/include/asm/signal.h
index 7bc15048a64f..7bc15048a64f 100644
--- a/include/asm-h8300/signal.h
+++ b/arch/h8300/include/asm/signal.h
diff --git a/include/asm-h8300/smp.h b/arch/h8300/include/asm/smp.h
index 9e9bd7e58922..9e9bd7e58922 100644
--- a/include/asm-h8300/smp.h
+++ b/arch/h8300/include/asm/smp.h
diff --git a/include/asm-h8300/socket.h b/arch/h8300/include/asm/socket.h
index da2520dbf254..da2520dbf254 100644
--- a/include/asm-h8300/socket.h
+++ b/arch/h8300/include/asm/socket.h
diff --git a/include/asm-h8300/sockios.h b/arch/h8300/include/asm/sockios.h
index e9c7ec810c23..e9c7ec810c23 100644
--- a/include/asm-h8300/sockios.h
+++ b/arch/h8300/include/asm/sockios.h
diff --git a/include/asm-h8300/spinlock.h b/arch/h8300/include/asm/spinlock.h
index d5407fa173e4..d5407fa173e4 100644
--- a/include/asm-h8300/spinlock.h
+++ b/arch/h8300/include/asm/spinlock.h
diff --git a/include/asm-h8300/stat.h b/arch/h8300/include/asm/stat.h
index 62c3cc24dfe6..62c3cc24dfe6 100644
--- a/include/asm-h8300/stat.h
+++ b/arch/h8300/include/asm/stat.h
diff --git a/include/asm-h8300/statfs.h b/arch/h8300/include/asm/statfs.h
index b96efa712aac..b96efa712aac 100644
--- a/include/asm-h8300/statfs.h
+++ b/arch/h8300/include/asm/statfs.h
diff --git a/include/asm-h8300/string.h b/arch/h8300/include/asm/string.h
index ca5034897d87..ca5034897d87 100644
--- a/include/asm-h8300/string.h
+++ b/arch/h8300/include/asm/string.h
diff --git a/include/asm-h8300/system.h b/arch/h8300/include/asm/system.h
index 4b8e475908ae..4b8e475908ae 100644
--- a/include/asm-h8300/system.h
+++ b/arch/h8300/include/asm/system.h
diff --git a/include/asm-h8300/target_time.h b/arch/h8300/include/asm/target_time.h
index 9f2a9aa1fe6f..9f2a9aa1fe6f 100644
--- a/include/asm-h8300/target_time.h
+++ b/arch/h8300/include/asm/target_time.h
diff --git a/include/asm-h8300/termbits.h b/arch/h8300/include/asm/termbits.h
index 31eca81db3f7..31eca81db3f7 100644
--- a/include/asm-h8300/termbits.h
+++ b/arch/h8300/include/asm/termbits.h
diff --git a/include/asm-h8300/termios.h b/arch/h8300/include/asm/termios.h
index 70eea64b4213..70eea64b4213 100644
--- a/include/asm-h8300/termios.h
+++ b/arch/h8300/include/asm/termios.h
diff --git a/include/asm-h8300/thread_info.h b/arch/h8300/include/asm/thread_info.h
index aafd4d322ec3..aafd4d322ec3 100644
--- a/include/asm-h8300/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
diff --git a/include/asm-h8300/timex.h b/arch/h8300/include/asm/timex.h
index 23e67013439f..23e67013439f 100644
--- a/include/asm-h8300/timex.h
+++ b/arch/h8300/include/asm/timex.h
diff --git a/include/asm-h8300/tlb.h b/arch/h8300/include/asm/tlb.h
index 3dea80ad9e6f..3dea80ad9e6f 100644
--- a/include/asm-h8300/tlb.h
+++ b/arch/h8300/include/asm/tlb.h
diff --git a/include/asm-h8300/tlbflush.h b/arch/h8300/include/asm/tlbflush.h
index 41c148a9208e..41c148a9208e 100644
--- a/include/asm-h8300/tlbflush.h
+++ b/arch/h8300/include/asm/tlbflush.h
diff --git a/include/asm-h8300/topology.h b/arch/h8300/include/asm/topology.h
index fdc121924d4c..fdc121924d4c 100644
--- a/include/asm-h8300/topology.h
+++ b/arch/h8300/include/asm/topology.h
diff --git a/include/asm-h8300/traps.h b/arch/h8300/include/asm/traps.h
index 41cf6be02f68..41cf6be02f68 100644
--- a/include/asm-h8300/traps.h
+++ b/arch/h8300/include/asm/traps.h
diff --git a/include/asm-h8300/types.h b/arch/h8300/include/asm/types.h
index 12875190b156..12875190b156 100644
--- a/include/asm-h8300/types.h
+++ b/arch/h8300/include/asm/types.h
diff --git a/include/asm-h8300/uaccess.h b/arch/h8300/include/asm/uaccess.h
index 356068cd0879..356068cd0879 100644
--- a/include/asm-h8300/uaccess.h
+++ b/arch/h8300/include/asm/uaccess.h
diff --git a/include/asm-h8300/ucontext.h b/arch/h8300/include/asm/ucontext.h
index 0bcf8f85fab9..0bcf8f85fab9 100644
--- a/include/asm-h8300/ucontext.h
+++ b/arch/h8300/include/asm/ucontext.h
diff --git a/include/asm-h8300/unaligned.h b/arch/h8300/include/asm/unaligned.h
index b8d06c70c2da..b8d06c70c2da 100644
--- a/include/asm-h8300/unaligned.h
+++ b/arch/h8300/include/asm/unaligned.h
diff --git a/include/asm-h8300/unistd.h b/arch/h8300/include/asm/unistd.h
index 99f3c3561ecb..99f3c3561ecb 100644
--- a/include/asm-h8300/unistd.h
+++ b/arch/h8300/include/asm/unistd.h
diff --git a/include/asm-h8300/user.h b/arch/h8300/include/asm/user.h
index 14a9e18950f1..14a9e18950f1 100644
--- a/include/asm-h8300/user.h
+++ b/arch/h8300/include/asm/user.h
diff --git a/include/asm-h8300/virtconvert.h b/arch/h8300/include/asm/virtconvert.h
index 19cfd62b11c3..19cfd62b11c3 100644
--- a/include/asm-h8300/virtconvert.h
+++ b/arch/h8300/include/asm/virtconvert.h
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index a1d228f5e4e6..9942f24aff9e 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -40,9 +40,6 @@
#undef DEBUG
-extern void die_if_kernel(char *,struct pt_regs *,long);
-extern void free_initmem(void);
-
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
@@ -73,7 +70,7 @@ extern unsigned long memory_end;
* The parameters are pointers to where to stick the starting and ending
* addresses of available kernel virtual memory.
*/
-void paging_init(void)
+void __init paging_init(void)
{
/*
* Make sure start_mem is page aligned, otherwise bootmem and
@@ -122,7 +119,7 @@ void paging_init(void)
}
}
-void mem_init(void)
+void __init mem_init(void)
{
int codek = 0, datak = 0, initk = 0;
/* DAVIDM look at setup memory map generically with reserved area */
@@ -178,7 +175,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#endif
void
-free_initmem()
+free_initmem(void)
{
#ifdef CONFIG_RAMKERNEL
unsigned long addr;
diff --git a/arch/ia64/include/asm/kexec.h b/arch/ia64/include/asm/kexec.h
index 541be835fc5a..e1d58f819d78 100644
--- a/arch/ia64/include/asm/kexec.h
+++ b/arch/ia64/include/asm/kexec.h
@@ -9,7 +9,7 @@
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
-#define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096)
+#define KEXEC_CONTROL_PAGE_SIZE (8192 + 8192 + 4096)
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_IA_64
diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h
index 89594b442f83..ea310c0812aa 100644
--- a/arch/ia64/include/asm/sal.h
+++ b/arch/ia64/include/asm/sal.h
@@ -236,7 +236,7 @@ extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
extern unsigned short sal_revision; /* supported SAL spec revision */
extern unsigned short sal_version; /* SAL version; OEM dependent */
-#define SAL_VERSION_CODE(major, minor) ((BIN2BCD(major) << 8) | BIN2BCD(minor))
+#define SAL_VERSION_CODE(major, minor) ((bin2bcd(major) << 8) | bin2bcd(minor))
extern const char *ia64_sal_strerror (long status);
extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 41c712917ff7..8bdea8eb62e3 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -359,7 +359,31 @@ start_ap:
mov ar.rsc=0 // place RSE in enforced lazy mode
;;
loadrs // clear the dirty partition
- mov IA64_KR(PER_CPU_DATA)=r0 // clear physical per-CPU base
+ movl r19=__phys_per_cpu_start
+ mov r18=PERCPU_PAGE_SIZE
+ ;;
+#ifndef CONFIG_SMP
+ add r19=r19,r18
+ ;;
+#else
+(isAP) br.few 2f
+ mov r20=r19
+ sub r19=r19,r18
+ ;;
+ shr.u r18=r18,3
+1:
+ ld8 r21=[r20],8;;
+ st8[r19]=r21,8
+ adds r18=-1,r18;;
+ cmp4.lt p7,p6=0,r18
+(p7) br.cond.dptk.few 1b
+2:
+#endif
+ tpa r19=r19
+ ;;
+ .pred.rel.mutex isBP,isAP
+(isBP) mov IA64_KR(PER_CPU_DATA)=r19 // per-CPU base for cpu0
+(isAP) mov IA64_KR(PER_CPU_DATA)=r0 // clear physical per-CPU base
;;
mov ar.bspstore=r2 // establish the new RSE stack
;;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 593279f33e96..c27d5b2c182b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -927,17 +927,19 @@ cpu_init (void)
if (smp_processor_id() == 0) {
cpu_set(0, per_cpu(cpu_sibling_map, 0));
cpu_set(0, cpu_core_map[0]);
+ } else {
+ /*
+ * Set ar.k3 so that assembly code in MCA handler can compute
+ * physical addresses of per cpu variables with a simple:
+ * phys = ar.k3 + &per_cpu_var
+ * and the alt-dtlb-miss handler can set per-cpu mapping into
+ * the TLB when needed. head.S already did this for cpu0.
+ */
+ ia64_set_kr(IA64_KR_PER_CPU_DATA,
+ ia64_tpa(cpu_data) - (long) __per_cpu_start);
}
#endif
- /*
- * We set ar.k3 so that assembly code in MCA handler can compute
- * physical addresses of per cpu variables with a simple:
- * phys = ar.k3 + &per_cpu_var
- */
- ia64_set_kr(IA64_KR_PER_CPU_DATA,
- ia64_tpa(cpu_data) - (long) __per_cpu_start);
-
get_max_cacheline_size();
/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 03f1a9908afc..b39853a292d5 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -467,7 +467,9 @@ start_secondary (void *unused)
{
/* Early console may use I/O ports */
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
+#ifndef CONFIG_PRINTK_TIME
Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
+#endif
efi_map_pal_code();
cpu_init();
preempt_disable();
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a77206c2492..de71da811cd6 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -215,6 +215,9 @@ SECTIONS
/* Per-cpu data: */
percpu : { } :percpu
. = ALIGN(PERCPU_PAGE_SIZE);
+#ifdef CONFIG_SMP
+ . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
+#endif
__phys_per_cpu_start = .;
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 798bf9835a51..e566ff43884a 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,8 +163,14 @@ per_cpu_init (void)
* get_zeroed_page().
*/
if (first_time) {
+ void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
+
first_time=0;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+
+ __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
+ per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
+
+ for (cpu = 1; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
@@ -177,7 +183,7 @@ per_cpu_init (void)
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
+ cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
#else
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d83125e1ed27..78026aabaa7f 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,7 +143,11 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
int cpu;
for_each_possible_early_cpu(cpu) {
- if (node == node_cpuid[cpu].nid) {
+ if (cpu == 0) {
+ void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
+ __per_cpu_offset[cpu] = (char*)cpu0_data -
+ __per_cpu_start;
+ } else if (node == node_cpuid[cpu].nid) {
memcpy(__va(cpu_data), __phys_per_cpu_start,
__per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 63c9cafda9c4..587da5e0990f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -42,9 +42,6 @@ config GENERIC_HARDIRQS
bool
default y
-config HAVE_GET_USER_PAGES_FAST
- def_bool PPC64
-
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index fbe2932fa9e9..6251a4b10be7 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -291,6 +291,28 @@ struct hvcall_mpp_data {
};
int h_get_mpp(struct hvcall_mpp_data *);
+
+#ifdef CONFIG_PPC_PSERIES
+extern int CMO_PrPSP;
+extern int CMO_SecPSP;
+extern unsigned long CMO_PageSize;
+
+static inline int cmo_get_primary_psp(void)
+{
+ return CMO_PrPSP;
+}
+
+static inline int cmo_get_secondary_psp(void)
+{
+ return CMO_SecPSP;
+}
+
+static inline unsigned long cmo_get_page_size(void)
+{
+ return CMO_PageSize;
+}
+#endif /* CONFIG_PPC_PSERIES */
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index 17ba3a881bfd..5f68ecfdf516 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -20,7 +20,7 @@
#define TRACE_ENABLE_INTS bl .trace_hardirqs_on
#define TRACE_DISABLE_INTS bl .trace_hardirqs_off
#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
- cmpdi en, 0; \
+ cmpdi en,0; \
bne 95f; \
stb en,PACASOFTIRQEN(r13); \
bl .trace_hardirqs_off; \
@@ -29,7 +29,8 @@
li en,1;
#define TRACE_AND_RESTORE_IRQ(en) \
TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \
-96: stb en,PACASOFTIRQEN(r13)
+ stb en,PACASOFTIRQEN(r13); \
+96:
#else
#define TRACE_ENABLE_INTS
#define TRACE_DISABLE_INTS
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index acdcdc66f1b6..3736d9b33289 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -22,7 +22,7 @@
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
#endif
-#define KEXEC_CONTROL_CODE_SIZE 4096
+#define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
#ifdef __powerpc64__
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 9102b8bf0ead..6b993ef452ff 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -147,7 +147,6 @@ static inline void get_mmu_context(struct mm_struct *mm)
static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
mm->context.id = NO_CONTEXT;
- mm->context.vdso_base = 0;
return 0;
}
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index e084272ed1c2..f6cc7a43b4fa 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -92,7 +92,7 @@ COMPAT_SYS_SPU(readlink)
SYSCALL(uselib)
SYSCALL(swapon)
SYSCALL(reboot)
-SYSX(sys_ni_syscall,old32_readdir,old_readdir)
+SYSX(sys_ni_syscall,compat_sys_old_readdir,old_readdir)
SYSCALL_SPU(mmap)
SYSCALL_SPU(munmap)
SYSCALL_SPU(truncate)
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 99ee2f0f0f2b..8bb657519299 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -1155,7 +1155,7 @@ flush_tlbs:
lis r10, 0x40
1: addic. r10, r10, -0x1000
tlbie r10
- blt 1b
+ bgt 1b
sync
blr
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 1a09719c7628..b3eef30b5131 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -416,6 +416,8 @@ static void pseries_cmo_data(struct seq_file *m)
unsigned long cmo_faults = 0;
unsigned long cmo_fault_time = 0;
+ seq_printf(m, "cmo_enabled=%d\n", firmware_has_feature(FW_FEATURE_CMO));
+
if (!firmware_has_feature(FW_FEATURE_CMO))
return;
@@ -427,6 +429,9 @@ static void pseries_cmo_data(struct seq_file *m)
seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
seq_printf(m, "cmo_fault_time_usec=%lu\n",
cmo_fault_time / tb_ticks_per_usec);
+ seq_printf(m, "cmo_primary_psp=%d\n", cmo_get_primary_psp());
+ seq_printf(m, "cmo_secondary_psp=%d\n", cmo_get_secondary_psp());
+ seq_printf(m, "cmo_page_size=%lu\n", cmo_get_page_size());
}
static int pseries_lparcfg_data(struct seq_file *m, void *v)
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c
index cbaa34196797..ae63a964b858 100644
--- a/arch/powerpc/kernel/machine_kexec_32.c
+++ b/arch/powerpc/kernel/machine_kexec_32.c
@@ -51,7 +51,7 @@ void default_machine_kexec(struct kimage *image)
relocate_new_kernel_size);
flush_icache_range(reboot_code_buffer,
- reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
+ reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
/* now call it */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 6321ae36f729..7a6dfbca7682 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -899,7 +899,7 @@ relocate_new_kernel:
/* set a new stack at the bottom of our page... */
/* (not really needed now) */
- addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */
+ addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
stw r0, 0(r1)
/* Do the copies */
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index af07003573c4..7ff292475269 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -99,18 +99,3 @@ void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}
-
-struct bug_entry *module_find_bug(unsigned long bugaddr)
-{
- struct mod_arch_specific *mod;
- unsigned int i;
- struct bug_entry *bug;
-
- list_for_each_entry(mod, &module_bug_list, bug_list) {
- bug = mod->bug_table;
- for (i = 0; i < mod->num_bugs; ++i, ++bug)
- if (bugaddr == bug->bug_addr)
- return bug;
- }
- return NULL;
-}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 709f8cb8bfca..d98634c76060 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -52,63 +52,6 @@
#include <asm/ppc-pci.h>
#include <asm/syscalls.h>
-struct old_linux_dirent32 {
- u32 d_ino;
- u32 d_offset;
- unsigned short d_namlen;
- char d_name[1];
-};
-
-struct readdir_callback32 {
- struct old_linux_dirent32 __user * dirent;
- int count;
-};
-
-static int fillonedir(void * __buf, const char * name, int namlen,
- off_t offset, u64 ino, unsigned int d_type)
-{
- struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf;
- struct old_linux_dirent32 __user * dirent;
- ino_t d_ino;
-
- if (buf->count)
- return -EINVAL;
- d_ino = ino;
- if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
- return -EOVERFLOW;
- buf->count++;
- dirent = buf->dirent;
- put_user(d_ino, &dirent->d_ino);
- put_user(offset, &dirent->d_offset);
- put_user(namlen, &dirent->d_namlen);
- copy_to_user(dirent->d_name, name, namlen);
- put_user(0, dirent->d_name + namlen);
- return 0;
-}
-
-asmlinkage int old32_readdir(unsigned int fd, struct old_linux_dirent32 __user *dirent, unsigned int count)
-{
- int error = -EBADF;
- struct file * file;
- struct readdir_callback32 buf;
-
- file = fget(fd);
- if (!file)
- goto out;
-
- buf.count = 0;
- buf.dirent = dirent;
-
- error = vfs_readdir(file, (filldir_t)fillonedir, &buf);
- if (error < 0)
- goto out_putf;
- error = buf.count;
-
-out_putf:
- fput(file);
-out:
- return error;
-}
asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index a437267c6bf8..d967c1893ab5 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -2,6 +2,7 @@
#define _PSERIES_PLPAR_WRAPPERS_H
#include <asm/hvcall.h>
+#include <asm/page.h>
static inline long poll_pending(void)
{
@@ -44,12 +45,34 @@ static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
static inline long plpar_page_set_loaned(unsigned long vpa)
{
- return plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa, 0);
+ unsigned long cmo_page_sz = cmo_get_page_size();
+ long rc = 0;
+ int i;
+
+ for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+ rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
+
+ for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+ plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
+ vpa + i - cmo_page_sz, 0);
+
+ return rc;
}
static inline long plpar_page_set_active(unsigned long vpa)
{
- return plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa, 0);
+ unsigned long cmo_page_sz = cmo_get_page_size();
+ long rc = 0;
+ int i;
+
+ for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+ rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
+
+ for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+ plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
+ vpa + i - cmo_page_sz, 0);
+
+ return rc;
}
extern void vpa_init(int cpu);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 063a0d2fba30..3ce8a139b85d 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -68,6 +68,9 @@
#include "plpar_wrappers.h"
#include "pseries.h"
+int CMO_PrPSP = -1;
+int CMO_SecPSP = -1;
+unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -325,8 +328,7 @@ void pSeries_cmo_feature_init(void)
{
char *ptr, *key, *value, *end;
int call_status;
- int PrPSP = -1;
- int SecPSP = -1;
+ int page_order = IOMMU_PAGE_SHIFT;
pr_debug(" -> fw_cmo_feature_init()\n");
spin_lock(&rtas_data_buf_lock);
@@ -365,21 +367,31 @@ void pSeries_cmo_feature_init(void)
break;
}
- if (0 == strcmp(key, "PrPSP"))
- PrPSP = simple_strtol(value, NULL, 10);
+ if (0 == strcmp(key, "CMOPageSize"))
+ page_order = simple_strtol(value, NULL, 10);
+ else if (0 == strcmp(key, "PrPSP"))
+ CMO_PrPSP = simple_strtol(value, NULL, 10);
else if (0 == strcmp(key, "SecPSP"))
- SecPSP = simple_strtol(value, NULL, 10);
+ CMO_SecPSP = simple_strtol(value, NULL, 10);
value = key = ptr + 1;
}
ptr++;
}
- if (PrPSP != -1 || SecPSP != -1) {
+ /* Page size is returned as the power of 2 of the page size,
+ * convert to the page size in bytes before returning
+ */
+ CMO_PageSize = 1 << page_order;
+ pr_debug("CMO_PageSize = %lu\n", CMO_PageSize);
+
+ if (CMO_PrPSP != -1 || CMO_SecPSP != -1) {
pr_info("CMO enabled\n");
- pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", PrPSP, SecPSP);
+ pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
+ CMO_SecPSP);
powerpc_firmware_features |= FW_FEATURE_CMO;
} else
- pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", PrPSP, SecPSP);
+ pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
+ CMO_SecPSP);
spin_unlock(&rtas_data_buf_lock);
pr_debug(" <- fw_cmo_feature_init()\n");
}
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index f219c6411e0b..bb729b84a21e 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -31,7 +31,7 @@
#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
/* Allocate one page for the pdp and the second for the code */
-#define KEXEC_CONTROL_CODE_SIZE 4096
+#define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
diff --git a/arch/sh/include/asm/kexec.h b/arch/sh/include/asm/kexec.h
index 00f4260ef09b..765a5e1660fc 100644
--- a/arch/sh/include/asm/kexec.h
+++ b/arch/sh/include/asm/kexec.h
@@ -21,7 +21,7 @@
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
-#define KEXEC_CONTROL_CODE_SIZE 4096
+#define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_SH
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 3473e25231d9..e3dd9303643d 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -93,4 +93,8 @@ static inline unsigned long get_softint(void)
void __trigger_all_cpu_backtrace(void);
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+extern void *hardirq_stack[NR_CPUS];
+extern void *softirq_stack[NR_CPUS];
+#define __ARCH_HAS_DO_SOFTIRQ
+
#endif
diff --git a/arch/sparc/include/asm/of_device.h b/arch/sparc/include/asm/of_device.h
index e5f5aedc2293..bba777a416d3 100644
--- a/arch/sparc/include/asm/of_device.h
+++ b/arch/sparc/include/asm/of_device.h
@@ -30,8 +30,7 @@ struct of_device
extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
-/* These are just here during the transition */
-#include <linux/of_device.h>
+/* This is just here during the transition */
#include <linux/of_platform.h>
#endif /* __KERNEL__ */
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index ba43d85e8dde..9b6689d9d570 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -682,10 +682,32 @@ void ack_bad_irq(unsigned int virt_irq)
ino, virt_irq);
}
+void *hardirq_stack[NR_CPUS];
+void *softirq_stack[NR_CPUS];
+
+static __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+ void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+ __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+ if (orig_sp < sp ||
+ orig_sp > (sp + THREAD_SIZE)) {
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+ }
+
+ return orig_sp;
+}
+static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
void handler_irq(int irq, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
+ void *orig_sp;
clear_softint(1 << irq);
@@ -703,6 +725,8 @@ void handler_irq(int irq, struct pt_regs *regs)
"i" (PSTATE_IE)
: "memory");
+ orig_sp = set_hardirq_stack();
+
while (bucket_pa) {
struct irq_desc *desc;
unsigned long next_pa;
@@ -719,10 +743,38 @@ void handler_irq(int irq, struct pt_regs *regs)
bucket_pa = next_pa;
}
+ restore_hardirq_stack(orig_sp);
+
irq_exit();
set_irq_regs(old_regs);
}
+void do_softirq(void)
+{
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
+
+ __asm__ __volatile__("mov %%sp, %0\n\t"
+ "mov %1, %%sp"
+ : "=&r" (orig_sp)
+ : "r" (sp));
+ __do_softirq();
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
+ }
+
+ local_irq_restore(flags);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
diff --git a/arch/sparc64/kernel/kstack.h b/arch/sparc64/kernel/kstack.h
new file mode 100644
index 000000000000..4248d969272f
--- /dev/null
+++ b/arch/sparc64/kernel/kstack.h
@@ -0,0 +1,60 @@
+#ifndef _KSTACK_H
+#define _KSTACK_H
+
+#include <linux/thread_info.h>
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+
+/* SP must be STACK_BIAS adjusted already. */
+static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
+{
+ unsigned long base = (unsigned long) tp;
+
+ if (sp >= (base + sizeof(struct thread_info)) &&
+ sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
+ return true;
+
+ if (hardirq_stack[tp->cpu]) {
+ base = (unsigned long) hardirq_stack[tp->cpu];
+ if (sp >= base &&
+ sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
+ return true;
+ base = (unsigned long) softirq_stack[tp->cpu];
+ if (sp >= base &&
+ sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
+ return true;
+ }
+ return false;
+}
+
+/* Does "regs" point to a valid pt_regs trap frame? */
+static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
+{
+ unsigned long base = (unsigned long) tp;
+ unsigned long addr = (unsigned long) regs;
+
+ if (addr >= base &&
+ addr <= (base + THREAD_SIZE - sizeof(*regs)))
+ goto check_magic;
+
+ if (hardirq_stack[tp->cpu]) {
+ base = (unsigned long) hardirq_stack[tp->cpu];
+ if (addr >= base &&
+ addr <= (base + THREAD_SIZE - sizeof(*regs)))
+ goto check_magic;
+ base = (unsigned long) softirq_stack[tp->cpu];
+ if (addr >= base &&
+ addr <= (base + THREAD_SIZE - sizeof(*regs)))
+ goto check_magic;
+ }
+ return false;
+
+check_magic:
+ if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
+ return true;
+ return false;
+
+}
+
+#endif /* _KSTACK_H */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 7f5debdc5fed..15f4178592e7 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -52,6 +52,8 @@
#include <asm/irq_regs.h>
#include <asm/smp.h>
+#include "kstack.h"
+
static void sparc64_yield(int cpu)
{
if (tlb_type != hypervisor)
@@ -235,19 +237,6 @@ void show_regs(struct pt_regs *regs)
struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
static DEFINE_SPINLOCK(global_reg_snapshot_lock);
-static bool kstack_valid(struct thread_info *tp, struct reg_window *rw)
-{
- unsigned long thread_base, fp;
-
- thread_base = (unsigned long) tp;
- fp = (unsigned long) rw;
-
- if (fp < (thread_base + sizeof(struct thread_info)) ||
- fp >= (thread_base + THREAD_SIZE))
- return false;
- return true;
-}
-
static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
int this_cpu)
{
@@ -264,11 +253,11 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
- if (kstack_valid(tp, rw)) {
+ if (kstack_valid(tp, (unsigned long) rw)) {
global_reg_snapshot[this_cpu].i7 = rw->ins[7];
rw = (struct reg_window *)
(rw->ins[6] + STACK_BIAS);
- if (kstack_valid(tp, rw))
+ if (kstack_valid(tp, (unsigned long) rw))
global_reg_snapshot[this_cpu].rpc = rw->ins[7];
}
} else {
@@ -828,7 +817,7 @@ out:
unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc, fp, bias = 0;
- unsigned long thread_info_base;
+ struct thread_info *tp;
struct reg_window *rw;
unsigned long ret = 0;
int count = 0;
@@ -837,14 +826,12 @@ unsigned long get_wchan(struct task_struct *task)
task->state == TASK_RUNNING)
goto out;
- thread_info_base = (unsigned long) task_stack_page(task);
+ tp = task_thread_info(task);
bias = STACK_BIAS;
fp = task_thread_info(task)->ksp + bias;
do {
- /* Bogus frame pointer? */
- if (fp < (thread_info_base + sizeof(struct thread_info)) ||
- fp >= (thread_info_base + THREAD_SIZE))
+ if (!kstack_valid(tp, fp))
break;
rw = (struct reg_window *) fp;
pc = rw->ins[7];
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 27b81775a4de..743ccad61c60 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -858,9 +858,7 @@ void smp_tsb_sync(struct mm_struct *mm)
extern unsigned long xcall_flush_tlb_mm;
extern unsigned long xcall_flush_tlb_pending;
extern unsigned long xcall_flush_tlb_kernel_range;
-#ifdef CONFIG_MAGIC_SYSRQ
extern unsigned long xcall_fetch_glob_regs;
-#endif
extern unsigned long xcall_receive_signal;
extern unsigned long xcall_new_mmu_context_version;
#ifdef CONFIG_KGDB
@@ -1005,12 +1003,10 @@ void kgdb_roundup_cpus(unsigned long flags)
}
#endif
-#ifdef CONFIG_MAGIC_SYSRQ
void smp_fetch_global_regs(void)
{
smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
}
-#endif
/* We know that the window frames of the user have been flushed
* to the stack before we get here because all callers of us
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
index e9d7f0660f2e..4e21d4a57d3b 100644
--- a/arch/sparc64/kernel/stacktrace.c
+++ b/arch/sparc64/kernel/stacktrace.c
@@ -5,10 +5,12 @@
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
+#include "kstack.h"
+
void save_stack_trace(struct stack_trace *trace)
{
- unsigned long ksp, fp, thread_base;
struct thread_info *tp = task_thread_info(current);
+ unsigned long ksp, fp;
stack_trace_flush();
@@ -18,23 +20,18 @@ void save_stack_trace(struct stack_trace *trace)
);
fp = ksp + STACK_BIAS;
- thread_base = (unsigned long) tp;
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
unsigned long pc;
- /* Bogus frame pointer? */
- if (fp < (thread_base + sizeof(struct thread_info)) ||
- fp > (thread_base + THREAD_SIZE - sizeof(struct sparc_stackf)))
+ if (!kstack_valid(tp, fp))
break;
sf = (struct sparc_stackf *) fp;
regs = (struct pt_regs *) (sf + 1);
- if (((unsigned long)regs <=
- (thread_base + THREAD_SIZE - sizeof(*regs))) &&
- (regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
+ if (kstack_is_trap_frame(tp, regs)) {
if (!(regs->tstate & TSTATE_PRIV))
break;
pc = regs->tpc;
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 404e8561e2d0..3d924121c796 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -39,6 +39,7 @@
#include <asm/prom.h>
#include "entry.h"
+#include "kstack.h"
/* When an irrecoverable trap occurs at tl > 0, the trap entry
* code logs the trap state registers at every level in the trap
@@ -2115,14 +2116,12 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
struct pt_regs *regs;
unsigned long pc;
- /* Bogus frame pointer? */
- if (fp < (thread_base + sizeof(struct thread_info)) ||
- fp >= (thread_base + THREAD_SIZE))
+ if (!kstack_valid(tp, fp))
break;
sf = (struct sparc_stackf *) fp;
regs = (struct pt_regs *) (sf + 1);
- if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
+ if (kstack_is_trap_frame(tp, regs)) {
if (!(regs->tstate & TSTATE_PRIV))
break;
pc = regs->tpc;
diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
index 7735a7a60533..fad90ddb3a28 100644
--- a/arch/sparc64/lib/mcount.S
+++ b/arch/sparc64/lib/mcount.S
@@ -48,12 +48,45 @@ mcount:
sub %g3, STACK_BIAS, %g3
cmp %sp, %g3
bg,pt %xcc, 1f
- sethi %hi(panicstring), %g3
+ nop
+ lduh [%g6 + TI_CPU], %g1
+ sethi %hi(hardirq_stack), %g3
+ or %g3, %lo(hardirq_stack), %g3
+ sllx %g1, 3, %g1
+ ldx [%g3 + %g1], %g7
+ sub %g7, STACK_BIAS, %g7
+ cmp %sp, %g7
+ bleu,pt %xcc, 2f
+ sethi %hi(THREAD_SIZE), %g3
+ add %g7, %g3, %g7
+ cmp %sp, %g7
+ blu,pn %xcc, 1f
+2: sethi %hi(softirq_stack), %g3
+ or %g3, %lo(softirq_stack), %g3
+ ldx [%g3 + %g1], %g7
+ cmp %sp, %g7
+ bleu,pt %xcc, 2f
+ sethi %hi(THREAD_SIZE), %g3
+ add %g7, %g3, %g7
+ cmp %sp, %g7
+ blu,pn %xcc, 1f
+ nop
+ /* If we are already on ovstack, don't hop onto it
+ * again, we are already trying to output the stack overflow
+ * message.
+ */
sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
or %g7, %lo(ovstack), %g7
- add %g7, OVSTACKSIZE, %g7
+ add %g7, OVSTACKSIZE, %g3
+ sub %g3, STACK_BIAS + 192, %g3
sub %g7, STACK_BIAS, %g7
- mov %g7, %sp
+ cmp %sp, %g7
+ blu,pn %xcc, 2f
+ cmp %sp, %g3
+ bleu,pn %xcc, 1f
+ nop
+2: mov %g3, %sp
+ sethi %hi(panicstring), %g3
call prom_printf
or %g3, %lo(panicstring), %o0
call prom_halt
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4e821b3ecb03..b4aeb0f696dc 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -49,6 +49,7 @@
#include <asm/sstate.h>
#include <asm/mdesc.h>
#include <asm/cpudata.h>
+#include <asm/irq.h>
#define MAX_PHYS_ADDRESS (1UL << 42UL)
#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -795,6 +796,9 @@ static unsigned long nid_range(unsigned long start, unsigned long end,
start += PAGE_SIZE;
}
+ if (start > end)
+ start = end;
+
return start;
}
#else
@@ -1722,8 +1726,7 @@ void __init paging_init(void)
find_ramdisk(phys_base);
- if (cmdline_memory_size)
- lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
+ lmb_enforce_memory_limit(cmdline_memory_size);
lmb_analyze();
lmb_dump_all();
@@ -1771,6 +1774,16 @@ void __init paging_init(void)
if (tlb_type == hypervisor)
sun4v_mdesc_init();
+ /* Once the OF device tree and MDESC have been setup, we know
+ * the list of possible cpus. Therefore we can allocate the
+ * IRQ stacks.
+ */
+ for_each_possible_cpu(i) {
+ /* XXX Use node local allocations... XXX */
+ softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ }
+
/* Setup bootmem... */
last_valid_pfn = end_pfn = bootmem_init(phys_base);
@@ -1950,6 +1963,15 @@ void __init mem_init(void)
void free_initmem(void)
{
unsigned long addr, initend;
+ int do_free = 1;
+
+ /* If the physical memory maps were trimmed by kernel command
+ * line options, don't even try freeing this initmem stuff up.
+ * The kernel image could have been in the trimmed out region
+ * and if so the freeing below will free invalid page structs.
+ */
+ if (cmdline_memory_size)
+ do_free = 0;
/*
* The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
@@ -1964,13 +1986,16 @@ void free_initmem(void)
((unsigned long) __va(kern_base)) -
((unsigned long) KERNBASE));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- p = virt_to_page(page);
- ClearPageReserved(p);
- init_page_count(p);
- __free_page(p);
- num_physpages++;
- totalram_pages++;
+ if (do_free) {
+ p = virt_to_page(page);
+
+ ClearPageReserved(p);
+ init_page_count(p);
+ __free_page(p);
+ num_physpages++;
+ totalram_pages++;
+ }
}
}
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index ff1dc44d363e..86773e89dc1b 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -480,7 +480,6 @@ xcall_sync_tick:
b rtrap_xcall
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
-#ifdef CONFIG_MAGIC_SYSRQ
.globl xcall_fetch_glob_regs
xcall_fetch_glob_regs:
sethi %hi(global_reg_snapshot), %g1
@@ -511,7 +510,6 @@ xcall_fetch_glob_regs:
membar #StoreStore
stx %g3, [%g1 + GR_SNAP_THREAD]
retry
-#endif /* CONFIG_MAGIC_SYSRQ */
#ifdef DCACHE_ALIASING_POSSIBLE
.align 32
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3d0f2b6a5a16..68d91c8233f4 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,7 +22,6 @@ config X86
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_IOREMAP_PROT
- select HAVE_GET_USER_PAGES_FAST
select HAVE_KPROBES
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_KRETPROBES
@@ -952,9 +951,9 @@ config NUMA
local memory controller of the CPU and add some more
NUMA awareness to the kernel.
- For i386 this is currently highly experimental and should be only
+ For 32-bit this is currently highly experimental and should be only
used for kernel development. It might also cause boot failures.
- For x86_64 this is recommended on all multiprocessor Opteron systems.
+ For 64-bit this is recommended on all multiprocessor Opteron systems.
If the system is EM64T, you should say N unless your system is
EM64T NUMA.
@@ -1264,7 +1263,7 @@ config KEXEC
strongly in flux, so no good recommendation can be made.
config CRASH_DUMP
- bool "kernel crash dumps (EXPERIMENTAL)"
+ bool "kernel crash dumps"
depends on X86_64 || (X86_32 && HIGHMEM)
help
Generate crash dump after being started by kexec.
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index a34b9982c7cb..cc0ef13fba7a 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -24,10 +24,14 @@
#include <linux/edd.h>
#include <asm/boot.h>
#include <asm/setup.h>
+#include "bitops.h"
+#include <asm/cpufeature.h>
/* Useful macros */
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+
extern struct setup_header hdr;
extern struct boot_params boot_params;
@@ -242,6 +246,12 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize);
int cmdline_find_option_bool(const char *option);
/* cpu.c, cpucheck.c */
+struct cpu_features {
+ int level; /* Family, or 64 for x86-64 */
+ int model;
+ u32 flags[NCAPINTS];
+};
+extern struct cpu_features cpu;
int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
int validate_cpu(void);
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 92d6fd73dc7d..75298fe2edca 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -16,9 +16,6 @@
*/
#include "boot.h"
-#include "bitops.h"
-#include <asm/cpufeature.h>
-
#include "cpustr.h"
static char *cpu_name(int level)
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 7804389ee005..4b9ae7c56748 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -22,21 +22,13 @@
#ifdef _SETUP
# include "boot.h"
-# include "bitops.h"
#endif
#include <linux/types.h>
-#include <asm/cpufeature.h>
#include <asm/processor-flags.h>
#include <asm/required-features.h>
#include <asm/msr-index.h>
-struct cpu_features {
- int level; /* Family, or 64 for x86-64 */
- int model;
- u32 flags[NCAPINTS];
-};
-
-static struct cpu_features cpu;
+struct cpu_features cpu;
static u32 cpu_vendor[3];
static u32 err_flags[NCAPINTS];
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index 2296164b54d2..197421db1af1 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -73,6 +73,11 @@ static void keyboard_set_repeat(void)
*/
static void query_ist(void)
{
+ /* Some older BIOSes apparently crash on this call, so filter
+ it from machines too old to have SpeedStep at all. */
+ if (cpu.level < 6)
+ return;
+
asm("int $0x15"
: "=a" (boot_params.ist_info.signature),
"=b" (boot_params.ist_info.command),
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
index 53165c97336b..8c3c25f35578 100644
--- a/arch/x86/boot/memory.c
+++ b/arch/x86/boot/memory.c
@@ -13,7 +13,6 @@
*/
#include "boot.h"
-#include <linux/kernel.h>
#define SMAP 0x534d4150 /* ASCII "SMAP" */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index fa88a1d71290..bfd10fd211cd 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -97,6 +97,8 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#warning ACPI uses CMPXCHG, i486 and later hardware
#endif
+static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
+
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
@@ -158,6 +160,14 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
struct acpi_mcfg_allocation *pci_mmcfg_config;
int pci_mmcfg_config_num;
+static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
+{
+ if (!strcmp(mcfg->header.oem_id, "SGI"))
+ acpi_mcfg_64bit_base_addr = TRUE;
+
+ return 0;
+}
+
int __init acpi_parse_mcfg(struct acpi_table_header *header)
{
struct acpi_table_mcfg *mcfg;
@@ -190,8 +200,12 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header)
}
memcpy(pci_mmcfg_config, &mcfg[1], config_size);
+
+ acpi_mcfg_oem_check(mcfg);
+
for (i = 0; i < pci_mmcfg_config_num; ++i) {
- if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
+ if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
+ !acpi_mcfg_64bit_base_addr) {
printk(KERN_ERR PREFIX
"MMCONFIG not in low 4GB of memory\n");
kfree(pci_mmcfg_config);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index fa2161d5003b..426e5d91b63a 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -20,7 +20,7 @@ unsigned long acpi_realmode_flags;
/* address in low memory of the wakeup routine. */
static unsigned long acpi_realmode;
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
static char temp_stack[10240];
#endif
@@ -86,7 +86,7 @@ int acpi_save_state_mem(void)
#endif /* !CONFIG_64BIT */
header->pmode_cr0 = read_cr0();
- header->pmode_cr4 = read_cr4();
+ header->pmode_cr4 = read_cr4_safe();
header->realmode_flags = acpi_realmode_flags;
header->real_magic = 0x12345678;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 22d7d050905d..69b4d060b21c 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -65,7 +65,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
u8 *target;
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
- target = (iommu->cmd_buf + tail);
+ target = iommu->cmd_buf + tail;
memcpy_toio(target, cmd, sizeof(*cmd));
tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
@@ -101,16 +101,13 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
*/
static int iommu_completion_wait(struct amd_iommu *iommu)
{
- int ret;
+ int ret, ready = 0;
+ unsigned status = 0;
struct iommu_cmd cmd;
- volatile u64 ready = 0;
- unsigned long ready_phys = virt_to_phys(&ready);
unsigned long i = 0;
memset(&cmd, 0, sizeof(cmd));
- cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK;
- cmd.data[1] = upper_32_bits(ready_phys);
- cmd.data[2] = 1; /* value written to 'ready' */
+ cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
iommu->need_sync = 0;
@@ -122,9 +119,15 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
while (!ready && (i < EXIT_LOOP_COUNT)) {
++i;
- cpu_relax();
+ /* wait for the bit to become one */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
}
+ /* set bit back to zero */
+ status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
+ writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
@@ -161,7 +164,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
address &= PAGE_MASK;
CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
cmd.data[1] |= domid;
- cmd.data[2] = LOW_U32(address);
+ cmd.data[2] = lower_32_bits(address);
cmd.data[3] = upper_32_bits(address);
if (s) /* size bit - we flush more than one 4kb page */
cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index d9a9da597e79..a69cc0f52042 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -801,6 +801,21 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
}
/*
+ * Init the device table to not allow DMA access for devices and
+ * suppress all page faults
+ */
+static void init_device_table(void)
+{
+ u16 devid;
+
+ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+ set_dev_entry_bit(devid, DEV_ENTRY_VALID);
+ set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
+ set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
+ }
+}
+
+/*
* This function finally enables all IOMMUs found in the system after
* they have been initialized
*/
@@ -931,6 +946,9 @@ int __init amd_iommu_init(void)
if (amd_iommu_pd_alloc_bitmap == NULL)
goto free;
+ /* init the device table */
+ init_device_table();
+
/*
* let all alias entries point to itself
*/
@@ -954,15 +972,15 @@ int __init amd_iommu_init(void)
if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
goto free;
- ret = amd_iommu_init_dma_ops();
+ ret = sysdev_class_register(&amd_iommu_sysdev_class);
if (ret)
goto free;
- ret = sysdev_class_register(&amd_iommu_sysdev_class);
+ ret = sysdev_register(&device_amd_iommu);
if (ret)
goto free;
- ret = sysdev_register(&device_amd_iommu);
+ ret = amd_iommu_init_dma_ops();
if (ret)
goto free;
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index d6c898358371..f88bd0d982b0 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1454,8 +1454,6 @@ void disconnect_bsp_APIC(int virt_wire_setup)
}
}
-unsigned int __cpuinitdata maxcpus = NR_CPUS;
-
void __cpuinit generic_processor_info(int apicid, int version)
{
int cpu;
@@ -1482,12 +1480,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
return;
}
- if (num_processors >= maxcpus) {
- printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
- " Processor ignored.\n", maxcpus);
- return;
- }
-
num_processors++;
cpus_complement(tmp_map, cpu_present_map);
cpu = first_cpu(tmp_map);
@@ -1720,15 +1712,19 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
}
early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
-static int __init apic_set_verbosity(char *str)
+static int __init apic_set_verbosity(char *arg)
{
- if (strcmp("debug", str) == 0)
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "debug") == 0)
apic_verbosity = APIC_DEBUG;
- else if (strcmp("verbose", str) == 0)
+ else if (strcmp(arg, "verbose") == 0)
apic_verbosity = APIC_VERBOSE;
- return 1;
+
+ return 0;
}
-__setup("apic=", apic_set_verbosity);
+early_param("apic", apic_set_verbosity);
static int __init lapic_insert_resource(void)
{
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 7f1f030da7ee..446c062e831c 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -90,7 +90,6 @@ static unsigned long apic_phys;
unsigned long mp_lapic_addr;
-unsigned int __cpuinitdata maxcpus = NR_CPUS;
/*
* Get the LAPIC version
*/
@@ -1062,12 +1061,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
return;
}
- if (num_processors >= maxcpus) {
- printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
- " Processor ignored.\n", maxcpus);
- return;
- }
-
num_processors++;
cpus_complement(tmp_map, cpu_present_map);
cpu = first_cpu(tmp_map);
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 84a8220a6072..a6ef672adbba 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -56,9 +56,22 @@ void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
- if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
+ /*
+ * There is a known erratum on Pentium III and Core Solo
+ * and Core Duo CPUs.
+ * " Page with PAT set to WC while associated MTRR is UC
+ * may consolidate to UC "
+ * Because of this erratum, it is better to stick with
+ * setting WC in MTRR rather than using PAT on these CPUs.
+ *
+ * Enable PAT WC only on P4, Core 2 or later CPUs.
+ */
+ if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
return;
- break;
+
+ pat_disable("PAT WC disabled due to known CPU erratum.");
+ return;
+
case X86_VENDOR_AMD:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_TRANSMETA:
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c9b58a806e85..c8e315f1aa83 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -50,6 +50,8 @@ static double __initdata y = 3145727.0;
*/
static void __init check_fpu(void)
{
+ s32 fdiv_bug;
+
if (!boot_cpu_data.hard_math) {
#ifndef CONFIG_MATH_EMULATION
printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
@@ -74,8 +76,10 @@ static void __init check_fpu(void)
"fistpl %0\n\t"
"fwait\n\t"
"fninit"
- : "=m" (*&boot_cpu_data.fdiv_bug)
+ : "=m" (*&fdiv_bug)
: "m" (*&x), "m" (*&y));
+
+ boot_cpu_data.fdiv_bug = fdiv_bug;
if (boot_cpu_data.fdiv_bug)
printk("Hmm, FPU with FDIV bug.\n");
}
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 3fd7a67bb06a..e710a21bb6e8 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -134,23 +134,6 @@ static void __cpuinit set_cx86_memwb(void)
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
}
-static void __cpuinit set_cx86_inc(void)
-{
- unsigned char ccr3;
-
- printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
-
- ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- /* PCR1 -- Performance Control */
- /* Incrementor on, whatever that is */
- setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
- /* PCR0 -- Performance Control */
- /* Incrementor Margin 10 */
- setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
-}
-
/*
* Configure later MediaGX and/or Geode processor.
*/
@@ -174,7 +157,6 @@ static void __cpuinit geode_configure(void)
set_cx86_memwb();
set_cx86_reorder();
- set_cx86_inc();
local_irq_restore(flags);
}
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 509bd3d9eacd..43102e03e2d1 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -379,6 +379,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type *type)
{
unsigned int mask_lo, mask_hi, base_lo, base_hi;
+ unsigned int tmp, hi;
rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
if ((mask_lo & 0x800) == 0) {
@@ -392,8 +393,18 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
/* Work out the shifted address mask. */
- mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
- | mask_lo >> PAGE_SHIFT;
+ tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
+ mask_lo = size_or_mask | tmp;
+ /* Expand tmp with high bits to all 1s*/
+ hi = fls(tmp);
+ if (hi > 0) {
+ tmp |= ~((1<<(hi - 1)) - 1);
+
+ if (tmp != mask_lo) {
+ WARN_ON("mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
+ mask_lo = tmp;
+ }
+ }
/* This works correctly if size is a power of two, i.e. a
contiguous range. */
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 6f23969c8faf..b117d7f8a564 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -1496,11 +1496,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
if (!highest_pfn) {
- if (!kvm_para_available()) {
- printk(KERN_WARNING
+ WARN(!kvm_para_available(), KERN_WARNING
"WARNING: strange, CPU MTRRs all blank?\n");
- WARN_ON(1);
- }
return 0;
}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index de7439f82b92..05cc22dbd4ff 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -478,7 +478,13 @@ static int setup_p4_watchdog(unsigned nmi_hz)
perfctr_msr = MSR_P4_IQ_PERFCTR1;
evntsel_msr = MSR_P4_CRU_ESCR0;
cccr_msr = MSR_P4_IQ_CCCR1;
- cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
+
+ /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */
+ if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4)
+ cccr_val = P4_CCCR_OVF_PMI0;
+ else
+ cccr_val = P4_CCCR_OVF_PMI1;
+ cccr_val |= P4_CCCR_ESCR_SELECT(4);
}
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
index 4b63c8e1f13b..5cab48ee61a4 100644
--- a/arch/x86/kernel/efi_32.c
+++ b/arch/x86/kernel/efi_32.c
@@ -53,7 +53,7 @@ void efi_call_phys_prelog(void)
* directory. If I have PAE, I just need to duplicate one entry in
* page directory.
*/
- cr4 = read_cr4();
+ cr4 = read_cr4_safe();
if (cr4 & X86_CR4_PAE) {
efi_bak_pg_dir_pointer[0].pgd =
@@ -91,7 +91,7 @@ void efi_call_phys_epilog(void)
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
- cr4 = read_cr4();
+ cr4 = read_cr4_safe();
if (cr4 & X86_CR4_PAE) {
swapper_pg_dir[pgd_index(0)].pgd =
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 2cfcbded888a..2d7e307c7779 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -222,7 +222,7 @@ static __init void map_low_mmrs(void)
enum map_type {map_wb, map_uc};
-static void map_high(char *id, unsigned long base, int shift, enum map_type map_type)
+static __init void map_high(char *id, unsigned long base, int shift, enum map_type map_type)
{
unsigned long bytes, paddr;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 1b318e903bf6..9bfc4d72fb2e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -88,6 +88,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
(__START_KERNEL & PGDIR_MASK)));
+ BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
/* clear bss before set_intr_gate with early_idt_handler */
clear_bss();
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index ad2b15a1334d..59fd3b6b1303 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -359,6 +359,7 @@ static int hpet_clocksource_register(void)
int __init hpet_enable(void)
{
unsigned long id;
+ int i;
if (!is_hpet_capable())
return 0;
@@ -369,6 +370,29 @@ int __init hpet_enable(void)
* Read the period and check for a sane value:
*/
hpet_period = hpet_readl(HPET_PERIOD);
+
+ /*
+ * AMD SB700 based systems with spread spectrum enabled use a
+ * SMM based HPET emulation to provide proper frequency
+ * setting. The SMM code is initialized with the first HPET
+ * register access and takes some time to complete. During
+ * this time the config register reads 0xffffffff. We check
+ * for max. 1000 loops whether the config register reads a non
+ * 0xffffffff value to make sure that HPET is up and running
+ * before we go further. A counting loop is safe, as the HPET
+ * access takes thousands of CPU cycles. On non SB700 based
+ * machines this check is only done once and has no side
+ * effects.
+ */
+ for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
+ if (i == 1000) {
+ printk(KERN_WARNING
+ "HPET config register value = 0xFFFFFFFF. "
+ "Disabling HPET\n");
+ goto out_nohpet;
+ }
+ }
+
if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
goto out_nohpet;
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index de9aa0e3a9c5..09cddb57bec4 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -57,7 +57,7 @@ atomic_t irq_mis_count;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
+DEFINE_SPINLOCK(vector_lock);
int timer_through_8259 __initdata;
@@ -1209,10 +1209,6 @@ static int assign_irq_vector(int irq)
return vector;
}
-void setup_vector_irq(int cpu)
-{
-}
-
static struct irq_chip ioapic_chip;
#define IOAPIC_AUTO -1
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 8269434d1707..61a83b70c18f 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -101,7 +101,7 @@ int timer_through_8259 __initdata;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
-DEFINE_SPINLOCK(vector_lock);
+static DEFINE_SPINLOCK(vector_lock);
/*
* # of IRQ routing registers
@@ -697,6 +697,19 @@ static int pin_2_irq(int idx, int apic, int pin)
return irq;
}
+void lock_vector_lock(void)
+{
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+ */
+ spin_lock(&vector_lock);
+}
+
+void unlock_vector_lock(void)
+{
+ spin_unlock(&vector_lock);
+}
+
static int __assign_irq_vector(int irq, cpumask_t mask)
{
/*
@@ -802,7 +815,7 @@ static void __clear_irq_vector(int irq)
cpus_clear(cfg->domain);
}
-static void __setup_vector_irq(int cpu)
+void __setup_vector_irq(int cpu)
{
/* Initialize vector_irq on a new cpu */
/* This function must be called with vector_lock held */
@@ -825,14 +838,6 @@ static void __setup_vector_irq(int cpu)
}
}
-void setup_vector_irq(int cpu)
-{
- spin_lock(&vector_lock);
- __setup_vector_irq(smp_processor_id());
- spin_unlock(&vector_lock);
-}
-
-
static struct irq_chip ioapic_chip;
static void ioapic_register_intr(int irq, unsigned long trigger)
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 9fe478d98406..0732adba05ca 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/numa.h>
#include <linux/ftrace.h>
+#include <linux/suspend.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -78,7 +79,7 @@ static void load_segments(void)
/*
* A architecture hook called to validate the
* proposed image and prepare the control pages
- * as needed. The pages for KEXEC_CONTROL_CODE_SIZE
+ * as needed. The pages for KEXEC_CONTROL_PAGE_SIZE
* have been allocated, but the segments have yet
* been copied into the kernel.
*
@@ -113,6 +114,7 @@ void machine_kexec(struct kimage *image)
{
unsigned long page_list[PAGES_NR];
void *control_page;
+ int save_ftrace_enabled;
asmlinkage unsigned long
(*relocate_kernel_ptr)(unsigned long indirection_page,
unsigned long control_page,
@@ -120,7 +122,12 @@ void machine_kexec(struct kimage *image)
unsigned int has_pae,
unsigned int preserve_context);
- tracer_disable();
+#ifdef CONFIG_KEXEC_JUMP
+ if (kexec_image->preserve_context)
+ save_processor_state();
+#endif
+
+ save_ftrace_enabled = __ftrace_enabled_save();
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
@@ -138,7 +145,7 @@ void machine_kexec(struct kimage *image)
}
control_page = page_address(image->control_code_page);
- memcpy(control_page, relocate_kernel, PAGE_SIZE/2);
+ memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
relocate_kernel_ptr = control_page;
page_list[PA_CONTROL_PAGE] = __pa(control_page);
@@ -178,6 +185,13 @@ void machine_kexec(struct kimage *image)
(unsigned long)page_list,
image->start, cpu_has_pae,
image->preserve_context);
+
+#ifdef CONFIG_KEXEC_JUMP
+ if (kexec_image->preserve_context)
+ restore_processor_state();
+#endif
+
+ __ftrace_enabled_restore(save_ftrace_enabled);
}
void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 07c0f828f488..3b599518c322 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -33,6 +33,8 @@
#include <linux/module.h>
#include <asm/geode.h>
+#define MFGPT_DEFAULT_IRQ 7
+
static struct mfgpt_timer_t {
unsigned int avail:1;
} mfgpt_timers[MFGPT_MAX_TIMERS];
@@ -157,29 +159,48 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
}
EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
-int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable)
+int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable)
{
- u32 val, dummy;
- int offset;
+ u32 zsel, lpc, dummy;
+ int shift;
if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
return -EIO;
- if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
+ /*
+ * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
+ * is using the same CMP of the timer's Siamese twin, the IRQ is set to
+ * 2, and we mustn't use nor change it.
+ * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
+ * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
+ * with *irq==0 is safe. Currently there _are_ no 2 drivers.
+ */
+ rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
+ shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4;
+ if (((zsel >> shift) & 0xF) == 2)
return -EIO;
- rdmsr(MSR_PIC_ZSEL_LOW, val, dummy);
+ /* Choose IRQ: if none supplied, keep IRQ already set or use default */
+ if (!*irq)
+ *irq = (zsel >> shift) & 0xF;
+ if (!*irq)
+ *irq = MFGPT_DEFAULT_IRQ;
- offset = (timer % 4) * 4;
-
- val &= ~((0xF << offset) | (0xF << (offset + 16)));
+ /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
+ if (*irq < 1 || *irq == 2 || *irq > 15)
+ return -EIO;
+ rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
+ if (lpc & (1 << *irq))
+ return -EIO;
+ /* All chosen and checked - go for it */
+ if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
+ return -EIO;
if (enable) {
- val |= (irq & 0x0F) << (offset);
- val |= (irq & 0x0F) << (offset + 16);
+ zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
+ wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
}
- wrmsr(MSR_PIC_ZSEL_LOW, val, dummy);
return 0;
}
@@ -242,7 +263,7 @@ EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
static u16 mfgpt_event_clock;
-static int irq = 7;
+static int irq;
static int __init mfgpt_setup(char *str)
{
get_option(&str, &irq);
@@ -346,7 +367,7 @@ int __init mfgpt_timer_setup(void)
mfgpt_event_clock = timer;
/* Set up the IRQ on the MFGPT side */
- if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) {
+ if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) {
printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
return -EIO;
}
@@ -374,13 +395,14 @@ int __init mfgpt_timer_setup(void)
&mfgpt_clockevent);
printk(KERN_INFO
- "mfgpt-timer: registering the MFGPT timer as a clock event.\n");
+ "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n",
+ timer, irq);
clockevents_register_device(&mfgpt_clockevent);
return 0;
err:
- geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, irq);
+ geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq);
printk(KERN_ERR
"mfgpt-timer: Unable to set up the MFGPT clock source\n");
return -EIO;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index fdfdc550b366..efc2f361fe85 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata mmconf_dmi_table[] = {
{}
};
-void __init check_enable_amd_mmconf_dmi(void)
+void __cpuinit check_enable_amd_mmconf_dmi(void)
{
dmi_check_system(mmconf_dmi_table);
}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 6ae005ccaed8..b3fb430725cb 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -49,7 +49,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
return sum & 0xFF;
}
-static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
+static void __init MP_processor_info(struct mpc_config_processor *m)
{
int apicid;
char *bootup_cpu = "";
@@ -83,7 +83,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
if (x86_quirks->mpc_oem_bus_info)
x86_quirks->mpc_oem_bus_info(m, str);
else
- printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str);
+ apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str);
#if MAX_MP_BUSSES < 256
if (m->mpc_busid >= MAX_MP_BUSSES) {
@@ -154,7 +154,7 @@ static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
{
- printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x,"
+ apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC INT %02x\n",
m->mpc_irqtype, m->mpc_irqflag & 3,
(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
@@ -163,7 +163,7 @@ static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
{
- printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x,"
+ apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC INT %02x\n",
mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
(mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
@@ -235,7 +235,7 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
{
- printk(KERN_INFO "Lint: type %d, pol %d, trig %d, bus %02x,"
+ apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
m->mpc_irqtype, m->mpc_irqflag & 3,
(m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
@@ -484,7 +484,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
}
-static void construct_ioapic_table(int mpc_default_type)
+static void __init construct_ioapic_table(int mpc_default_type)
{
struct mpc_config_ioapic ioapic;
struct mpc_config_bus bus;
@@ -529,7 +529,7 @@ static void construct_ioapic_table(int mpc_default_type)
construct_default_ioirq_mptable(mpc_default_type);
}
#else
-static inline void construct_ioapic_table(int mpc_default_type) { }
+static inline void __init construct_ioapic_table(int mpc_default_type) { }
#endif
static inline void __init construct_default_ISA_mptable(int mpc_default_type)
@@ -695,7 +695,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
unsigned int *bp = phys_to_virt(base);
struct intel_mp_floating *mpf;
- printk(KERN_DEBUG "Scan SMP from %p for %ld bytes.\n", bp, length);
+ apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
+ bp, length);
BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 9fd809552447..e43938086885 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -131,7 +131,7 @@ static int msr_open(struct inode *inode, struct file *file)
ret = -EIO; /* MSR not supported */
out:
unlock_kernel();
- return 0;
+ return ret;
}
/*
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index ac6d51222e7d..abb78a2cc4ad 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -114,6 +114,23 @@ static __init void nmi_cpu_busy(void *data)
}
#endif
+static void report_broken_nmi(int cpu, int *prev_nmi_count)
+{
+ printk(KERN_CONT "\n");
+
+ printk(KERN_WARNING
+ "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
+ cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
+
+ printk(KERN_WARNING
+ "Please report this to bugzilla.kernel.org,\n");
+ printk(KERN_WARNING
+ "and attach the output of the 'dmesg' command.\n");
+
+ per_cpu(wd_enabled, cpu) = 0;
+ atomic_dec(&nmi_active);
+}
+
int __init check_nmi_watchdog(void)
{
unsigned int *prev_nmi_count;
@@ -141,15 +158,8 @@ int __init check_nmi_watchdog(void)
for_each_online_cpu(cpu) {
if (!per_cpu(wd_enabled, cpu))
continue;
- if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
- printk(KERN_WARNING "WARNING: CPU#%d: NMI "
- "appears to be stuck (%d->%d)!\n",
- cpu,
- prev_nmi_count[cpu],
- get_nmi_count(cpu));
- per_cpu(wd_enabled, cpu) = 0;
- atomic_dec(&nmi_active);
- }
+ if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
+ report_broken_nmi(cpu, prev_nmi_count);
}
endflag = 1;
if (!atomic_read(&nmi_active)) {
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index b8c45610b20a..eecc8c18f010 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -73,7 +73,7 @@ static void __init smp_dump_qct(void)
}
-void __init numaq_tsc_disable(void)
+void __cpuinit numaq_tsc_disable(void)
{
if (!found_numaq)
return;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 94da4d52d798..300da17e61cb 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -471,7 +471,7 @@ struct pv_lock_ops pv_lock_ops = {
.spin_unlock = __ticket_spin_unlock,
#endif
};
-EXPORT_SYMBOL_GPL(pv_lock_ops);
+EXPORT_SYMBOL(pv_lock_ops);
EXPORT_SYMBOL_GPL(pv_time_ops);
EXPORT_SYMBOL (pv_cpu_ops);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index b67a4b1d4eae..dcdac6c826e9 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -343,9 +343,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
/* were we called with bad_dma_address? */
badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
- printk(KERN_ERR "Calgary: driver tried unmapping bad DMA "
+ WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
- WARN_ON(1);
return;
}
@@ -1269,13 +1268,15 @@ static inline int __init determine_tce_table_size(u64 ram)
static int __init build_detail_arrays(void)
{
unsigned long ptr;
- int i, scal_detail_size, rio_detail_size;
+ unsigned numnodes, i;
+ int scal_detail_size, rio_detail_size;
- if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){
+ numnodes = rio_table_hdr->num_scal_dev;
+ if (numnodes > MAX_NUMNODES){
printk(KERN_WARNING
"Calgary: MAX_NUMNODES too low! Defined as %d, "
"but system has %d nodes.\n",
- MAX_NUMNODES, rio_table_hdr->num_scal_dev);
+ MAX_NUMNODES, numnodes);
return -ENODEV;
}
@@ -1296,8 +1297,7 @@ static int __init build_detail_arrays(void)
}
ptr = ((unsigned long)rio_table_hdr) + 3;
- for (i = 0; i < rio_table_hdr->num_scal_dev;
- i++, ptr += scal_detail_size)
+ for (i = 0; i < numnodes; i++, ptr += scal_detail_size)
scal_devs[i] = (struct scal_detail *)ptr;
for (i = 0; i < rio_table_hdr->num_rio_dev;
@@ -1350,7 +1350,7 @@ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
* Function for kdump case. Get the tce tables from first kernel
* by reading the contents of the base adress register of calgary iommu
*/
-static void get_tce_space_from_tar()
+static void __init get_tce_space_from_tar(void)
{
int bus;
void __iomem *target;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 53bc653ed5ca..3b7a1ddcc0bc 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -95,7 +95,6 @@ static inline void play_dead(void)
{
/* This must be done before dead CPU ack */
cpu_exit_clear();
- wbinvd();
mb();
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
@@ -104,8 +103,8 @@ static inline void play_dead(void)
* With physical CPU hotplug, we should halt the cpu
*/
local_irq_disable();
- while (1)
- halt();
+ /* mask all interrupts, flush any and all caches, and halt */
+ wbinvd_halt();
}
#else
static inline void play_dead(void)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3fb62a7d9a16..71553b664e2a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -93,14 +93,13 @@ DECLARE_PER_CPU(int, cpu_state);
static inline void play_dead(void)
{
idle_task_exit();
- wbinvd();
mb();
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
local_irq_disable();
- while (1)
- halt();
+ /* mask all interrupts, flush any and all caches, and halt */
+ wbinvd_halt();
}
#else
static inline void play_dead(void)
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index 703310a99023..6f50664b2ba5 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -20,10 +20,11 @@
#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define PAE_PGD_ATTR (_PAGE_PRESENT)
-/* control_page + PAGE_SIZE/2 ~ control_page + PAGE_SIZE * 3/4 are
- * used to save some data for jumping back
+/* control_page + KEXEC_CONTROL_CODE_MAX_SIZE
+ * ~ control_page + PAGE_SIZE are used as data storage and stack for
+ * jumping back
*/
-#define DATA(offset) (PAGE_SIZE/2+(offset))
+#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
/* Minimal CPU state */
#define ESP DATA(0x0)
@@ -376,3 +377,6 @@ swap_pages:
popl %ebx
popl %ebp
ret
+
+ .globl kexec_control_code_size
+.set kexec_control_code_size, . - relocate_kernel
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2d888586385d..a4656adab53b 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -445,7 +445,7 @@ static void __init reserve_early_setup_data(void)
* @size: Size of the crashkernel memory to reserve.
* Returns the base address on success, and -1ULL on failure.
*/
-unsigned long long find_and_reserve_crashkernel(unsigned long long size)
+unsigned long long __init find_and_reserve_crashkernel(unsigned long long size)
{
const unsigned long long alignment = 16<<20; /* 16M */
unsigned long long start = 0LL;
@@ -604,6 +604,14 @@ void __init setup_arch(char **cmdline_p)
early_cpu_init();
early_ioremap_init();
+#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
+ /*
+ * Must be before kernel pagetables are setup
+ * or fixmap area is touched.
+ */
+ vmi_init();
+#endif
+
ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
screen_info = boot_params.screen_info;
edid_info = boot_params.edid_info;
@@ -817,14 +825,6 @@ void __init setup_arch(char **cmdline_p)
kvmclock_init();
#endif
-#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
- /*
- * Must be after max_low_pfn is determined, and before kernel
- * pagetables are setup.
- */
- vmi_init();
-#endif
-
paravirt_pagetable_setup_start(swapper_pg_dir);
paging_init();
paravirt_pagetable_setup_done(swapper_pg_dir);
@@ -861,12 +861,6 @@ void __init setup_arch(char **cmdline_p)
init_apic_mappings();
ioapic_init_mappings();
-#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
- if (def_to_bigsmp)
- printk(KERN_WARNING "More than 8 CPUs detected and "
- "CONFIG_X86_PC cannot handle it.\nUse "
- "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
-#endif
kvm_guest_init();
e820_reserve_resources();
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index b45ef8ddd651..ca316b5b742c 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -104,7 +104,16 @@ static inline int restore_i387(struct _fpstate __user *buf)
clts();
task_thread_info(current)->status |= TS_USEDFPU;
}
- return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
+ err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
+ if (unlikely(err)) {
+ /*
+ * Encountered an error while doing the restore from the
+ * user buffer, clear the fpu state.
+ */
+ clear_fpu(tsk);
+ clear_used_math();
+ }
+ return err;
}
/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 332512767f4f..e139e617f422 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -326,12 +326,16 @@ static void __cpuinit start_secondary(void *unused)
* for which cpus receive the IPI. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
+ *
+ * We need to hold vector_lock so there the set of online cpus
+ * does not change while we are assigning vectors to cpus. Holding
+ * this lock ensures we don't half assign or remove an irq from a cpu.
*/
ipi_call_lock_irq();
-#ifdef CONFIG_X86_IO_APIC
- setup_vector_irq(smp_processor_id());
-#endif
+ lock_vector_lock();
+ __setup_vector_irq(smp_processor_id());
cpu_set(smp_processor_id(), cpu_online_map);
+ unlock_vector_lock();
ipi_call_unlock_irq();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -752,6 +756,14 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
}
#ifdef CONFIG_X86_64
+
+/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
+static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
+{
+ if (!after_bootmem)
+ free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
+}
+
/*
* Allocate node local memory for the AP pda.
*
@@ -780,8 +792,7 @@ int __cpuinit get_local_pda(int cpu)
if (oldpda) {
memcpy(newpda, oldpda, size);
- if (!after_bootmem)
- free_bootmem((unsigned long)oldpda, size);
+ free_bootmem_pda(oldpda);
}
newpda->in_bootmem = 0;
@@ -1044,6 +1055,34 @@ static __init void disable_smp(void)
static int __init smp_sanity_check(unsigned max_cpus)
{
preempt_disable();
+
+#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
+ if (def_to_bigsmp && nr_cpu_ids > 8) {
+ unsigned int cpu;
+ unsigned nr;
+
+ printk(KERN_WARNING
+ "More than 8 CPUs detected - skipping them.\n"
+ "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
+
+ nr = 0;
+ for_each_present_cpu(cpu) {
+ if (nr >= 8)
+ cpu_clear(cpu, cpu_present_map);
+ nr++;
+ }
+
+ nr = 0;
+ for_each_possible_cpu(cpu) {
+ if (nr >= 8)
+ cpu_clear(cpu, cpu_possible_map);
+ nr++;
+ }
+
+ nr_cpu_ids = 8;
+ }
+#endif
+
if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
"by the BIOS.\n", hard_smp_processor_id());
@@ -1336,7 +1375,9 @@ int __cpu_disable(void)
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
+ lock_vector_lock();
remove_cpu_from_maps(cpu);
+ unlock_vector_lock();
fixup_irqs(cpu_online_map);
return 0;
}
@@ -1370,17 +1411,3 @@ void __cpu_die(unsigned int cpu)
BUG();
}
#endif
-
-/*
- * If the BIOS enumerates physical processors before logical,
- * maxcpus=N at enumeration-time can be used to disable HT.
- */
-static int __init parse_maxcpus(char *arg)
-{
- extern unsigned int maxcpus;
-
- if (arg)
- maxcpus = simple_strtoul(arg, NULL, 0);
- return 0;
-}
-early_param("maxcpus", parse_maxcpus);
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 99941b37eca0..397e309839dd 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -8,18 +8,21 @@
DEFINE_PER_CPU(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-/* Initialize the CPU's GDT. This is either the boot CPU doing itself
- (still using the master per-cpu area), or a CPU doing it for a
- secondary which will soon come up. */
+/*
+ * Initialize the CPU's GDT. This is either the boot CPU doing itself
+ * (still using the master per-cpu area), or a CPU doing it for a
+ * secondary which will soon come up.
+ */
__cpuinit void init_gdt(int cpu)
{
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ struct desc_struct gdt;
- pack_descriptor(&gdt[GDT_ENTRY_PERCPU],
- __per_cpu_offset[cpu], 0xFFFFF,
+ pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
0x2 | DESCTYPE_S, 0x8);
+ gdt.s = 1;
- gdt[GDT_ENTRY_PERCPU].s = 1;
+ write_gdt_entry(get_cpu_gdt_table(cpu),
+ GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
per_cpu(cpu_number, cpu) = cpu;
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index d0fbb7712ab0..8b8c0d6640fa 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -17,6 +17,7 @@
#include <asm/genapic.h>
#include <asm/idle.h>
#include <asm/tsc.h>
+#include <asm/irq_vectors.h>
#include <mach_apic.h>
@@ -783,7 +784,7 @@ static int __init uv_bau_init(void)
uv_init_blade(blade, node, cur_cpu);
cur_cpu += uv_blade_nr_possible_cpus(blade);
}
- set_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
+ alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
uv_enable_timeouts();
return 0;
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 3f18d73f420c..513caaca7115 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -1131,7 +1131,14 @@ asmlinkage void math_state_restore(void)
}
clts(); /* Allow maths ops (or we recurse) */
- restore_fpu_checking(&me->thread.xstate->fxsave);
+ /*
+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+ */
+ if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
+ stts();
+ force_sig(SIGSEGV, me);
+ return;
+ }
task_thread_info(me)->status |= TS_USEDFPU;
me->fpu_counter++;
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7603c0553909..46af71676738 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup);
/*
* Read TSC and the reference counters. Take care of SMI disturbance
*/
-static u64 __init tsc_read_refs(u64 *pm, u64 *hpet)
+static u64 tsc_read_refs(u64 *pm, u64 *hpet)
{
u64 t1, t2;
int i;
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 0577825cf89b..9ffb01c31c40 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -88,11 +88,9 @@ static __cpuinit void check_tsc_warp(void)
__raw_spin_unlock(&sync_lock);
}
}
- if (!(now-start)) {
- printk("Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
+ WARN(!(now-start),
+ "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
now-start, end-start);
- WARN_ON(1);
- }
}
/*
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 41e01b145c48..594ef47f0a63 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -184,8 +184,6 @@ static int __init visws_get_smp_config(unsigned int early)
return 1;
}
-extern unsigned int __cpuinitdata maxcpus;
-
/*
* The Visual Workstation is Intel MP compliant in the hardware
* sense, but it doesn't have a BIOS(-configuration table).
@@ -244,8 +242,8 @@ static int __init visws_find_smp_config(unsigned int reserve)
ncpus = CO_CPU_MAX;
}
- if (ncpus > maxcpus)
- ncpus = maxcpus;
+ if (ncpus > setup_max_cpus)
+ ncpus = setup_max_cpus;
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 1;
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 0a1b1a9d922d..6ca515d6db54 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -37,6 +37,7 @@
#include <asm/timer.h>
#include <asm/vmi_time.h>
#include <asm/kmap_types.h>
+#include <asm/setup.h>
/* Convenient for calling VMI functions indirectly in the ROM */
typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -683,7 +684,7 @@ void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
if (vmi_ops.set_linear_mapping)
- vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);
+ vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
}
/*
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index cdb2363697d2..af5bdad84604 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -209,3 +209,11 @@ SECTIONS
DWARF_DEBUG
}
+
+#ifdef CONFIG_KEXEC
+/* Link time checks */
+#include <asm/kexec.h>
+
+ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
+ "kexec control code size is too big")
+#endif
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 2977ea37791f..dfb932dcf136 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,7 +1,6 @@
obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
- pat.o pgtable.o
+ pat.o pgtable.o gup.o
-obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o
obj-$(CONFIG_X86_32) += pgtable_32.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 129618ca0ea2..a87ea0e4b3dc 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -60,7 +60,7 @@ static unsigned long dma_reserve __initdata;
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-int direct_gbpages __meminitdata
+int direct_gbpages
#ifdef CONFIG_DIRECT_GBPAGES
= 1
#endif
@@ -88,7 +88,11 @@ early_param("gbpages", parse_direct_gbpages_on);
int after_bootmem;
-static __init void *spp_getpage(void)
+/*
+ * NOTE: This function is marked __ref because it calls __init function
+ * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
+ */
+static __ref void *spp_getpage(void)
{
void *ptr;
@@ -314,6 +318,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
{
unsigned long pages = 0;
unsigned long last_map_addr = end;
+ unsigned long start = address;
int i = pmd_index(address);
@@ -334,6 +339,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
if (!pmd_large(*pmd))
last_map_addr = phys_pte_update(pmd, address,
end);
+ /* Count entries we're using from level2_ident_pgt */
+ if (start == 0)
+ pages++;
continue;
}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 016f335bbeea..d4b6e6a29ae3 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -170,7 +170,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
- retval = reserve_memtype(phys_addr, phys_addr + size,
+ retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
prot_val, &new_prot_val);
if (retval) {
pr_debug("Warning: reserve_memtype returned %d\n", retval);
@@ -553,13 +553,11 @@ static int __init check_early_ioremap_leak(void)
{
if (!early_ioremap_nested)
return 0;
-
- printk(KERN_WARNING
+ WARN(1, KERN_WARNING
"Debug warning: early ioremap leak of %d areas detected.\n",
- early_ioremap_nested);
+ early_ioremap_nested);
printk(KERN_WARNING
- "please boot with early_ioremap_debug and report the dmesg.\n");
- WARN_ON(1);
+ "please boot with early_ioremap_debug and report the dmesg.\n");
return 1;
}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index e7397e108beb..635b50e85581 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void)
"may miss events.\n");
}
-static void leave_uniprocessor(void)
+/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
+ but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
+static void __ref leave_uniprocessor(void)
{
int cpu;
int err;
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 6ae1f28a7ff2..7c3017287119 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -224,8 +224,7 @@ static int pageattr_test(void)
failed += print_split(&sc);
if (failed) {
- printk(KERN_ERR "NOT PASSED. Please report.\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "NOT PASSED. Please report.\n");
return -EINVAL;
} else {
if (print)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 041e81ef673a..1785591808bd 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -59,13 +59,19 @@ static void split_page_count(int level)
int arch_report_meminfo(char *page)
{
- int n = sprintf(page, "DirectMap4k: %8lu\n"
- "DirectMap2M: %8lu\n",
- direct_pages_count[PG_LEVEL_4K],
- direct_pages_count[PG_LEVEL_2M]);
+ int n = sprintf(page, "DirectMap4k: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_4K] << 2);
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ n += sprintf(page + n, "DirectMap2M: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_2M] << 11);
+#else
+ n += sprintf(page + n, "DirectMap4M: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_2M] << 12);
+#endif
#ifdef CONFIG_X86_64
- n += sprintf(page + n, "DirectMap1G: %8lu\n",
- direct_pages_count[PG_LEVEL_1G]);
+ if (direct_gbpages)
+ n += sprintf(page + n, "DirectMap1G: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_1G] << 20);
#endif
return n;
}
@@ -636,9 +642,8 @@ repeat:
if (!pte_val(old_pte)) {
if (!primary)
return 0;
- printk(KERN_WARNING "CPA: called for zero pte. "
+ WARN(1, KERN_WARNING "CPA: called for zero pte. "
"vaddr = %lx cpa->vaddr = %lx\n", address,
- WARN_ON(1);
*cpa->vaddr);
return -EINVAL;
}
@@ -927,7 +932,7 @@ int set_memory_uc(unsigned long addr, int numpages)
/*
* for now UC MINUS. see comments in ioremap_nocache()
*/
- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
+ if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_UC_MINUS, NULL))
return -EINVAL;
@@ -967,7 +972,7 @@ int set_memory_wc(unsigned long addr, int numpages)
if (!pat_enabled)
return set_memory_uc(addr, numpages);
- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
+ if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_WC, NULL))
return -EINVAL;
@@ -983,7 +988,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
int set_memory_wb(unsigned long addr, int numpages)
{
- free_memtype(addr, addr + numpages * PAGE_SIZE);
+ free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
return _set_memory_wb(addr, numpages);
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 647b1c4de719..f049b1d6ebdf 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
return -EBUSY;
}
+static struct memtype *cached_entry;
+static u64 cached_start;
+
/*
* req_type typically has one of the:
* - _PAGE_CACHE_WB
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock(&memtype_lock);
+ if (cached_entry && start >= cached_start)
+ entry = cached_entry;
+ else
+ entry = list_entry(&memtype_list, struct memtype, nd);
+
/* Search for existing mapping that overlaps the current range */
where = NULL;
- list_for_each_entry(entry, &memtype_list, nd) {
+ list_for_each_entry_continue(entry, &memtype_list, nd) {
if (end <= entry->start) {
where = entry->nd.prev;
+ cached_entry = list_entry(where, struct memtype, nd);
break;
} else if (start <= entry->start) { /* end > entry->start */
err = chk_conflict(new, entry, new_type);
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
dprintk("Overlap at 0x%Lx-0x%Lx\n",
entry->start, entry->end);
where = entry->nd.prev;
+ cached_entry = list_entry(where,
+ struct memtype, nd);
}
break;
} else if (start < entry->end) { /* start > entry->start */
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (!err) {
dprintk("Overlap at 0x%Lx-0x%Lx\n",
entry->start, entry->end);
- where = &entry->nd;
+ cached_entry = list_entry(entry->nd.prev,
+ struct memtype, nd);
+
+ /*
+ * Move to right position in the linked
+ * list to add this new entry
+ */
+ list_for_each_entry_continue(entry,
+ &memtype_list, nd) {
+ if (start <= entry->start) {
+ where = entry->nd.prev;
+ break;
+ }
+ }
}
break;
}
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return err;
}
+ cached_start = start;
+
if (where)
list_add(&new->nd, where);
else
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) {
if (entry->start == start && entry->end == end) {
+ if (cached_entry == entry || cached_start == start)
+ cached_entry = NULL;
+
list_del(&entry->nd);
kfree(entry);
err = 0;
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end)
}
-/*
- * /dev/mem mmap interface. The memtype used for mapping varies:
- * - Use UC for mappings with O_SYNC flag
- * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
- * inherit the memtype from existing mapping.
- * - Else use UC_MINUS memtype (for backward compatibility with existing
- * X drivers.
- */
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot)
{
u64 offset = ((u64) pfn) << PAGE_SHIFT;
- unsigned long flags = _PAGE_CACHE_UC_MINUS;
+ unsigned long flags = -1;
int retval;
if (!range_is_allowed(pfn, size))
return 0;
if (file->f_flags & O_SYNC) {
- flags = _PAGE_CACHE_UC;
+ flags = _PAGE_CACHE_UC_MINUS;
}
#ifdef CONFIG_X86_32
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
#endif
/*
- * With O_SYNC, we can only take UC mapping. Fail if we cannot.
+ * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
+ *
* Without O_SYNC, we want to get
* - WB for WB-able memory and no other conflicting mappings
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
* - Inherit from confliting mappings otherwise
*/
- if (flags != _PAGE_CACHE_UC_MINUS) {
+ if (flags != -1) {
retval = reserve_memtype(offset, offset + size, flags, NULL);
} else {
retval = reserve_memtype(offset, offset + size, -1, &flags);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 557b2abceef8..d50302774fe2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -207,6 +207,9 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
unsigned long addr;
int i;
+ if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
+ return;
+
pud = pud_offset(pgd, 0);
for (addr = i = 0; i < PREALLOCATED_PMDS;
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 1eb2973a301c..16ae70fc57e7 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -178,7 +178,7 @@ void acpi_numa_arch_fixup(void)
* start of the node, and that the current "end" address is after
* the previous one.
*/
-static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
+static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
{
/*
* Only add present memory as told by the e820.
@@ -189,10 +189,10 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
if (memory_chunk->start_pfn >= max_pfn) {
printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n",
memory_chunk->start_pfn, memory_chunk->end_pfn);
- return;
+ return -1;
}
if (memory_chunk->nid != nid)
- return;
+ return -1;
if (!node_has_online_mem(nid))
node_start_pfn[nid] = memory_chunk->start_pfn;
@@ -202,6 +202,8 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
if (node_end_pfn[nid] < memory_chunk->end_pfn)
node_end_pfn[nid] = memory_chunk->end_pfn;
+
+ return 0;
}
int __init get_memcfg_from_srat(void)
@@ -259,7 +261,9 @@ int __init get_memcfg_from_srat(void)
printk(KERN_DEBUG
"chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
- node_read_chunk(chunk->nid, chunk);
+ if (node_read_chunk(chunk->nid, chunk))
+ continue;
+
e820_register_active_regions(chunk->nid, chunk->start_pfn,
min(chunk->end_pfn, max_pfn));
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 3f90289410e6..0227694f7dab 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <linux/kdebug.h>
+#include <linux/cpu.h>
#include <asm/nmi.h>
#include <asm/msr.h>
#include <asm/apic.h>
@@ -28,23 +29,48 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
static int nmi_start(void);
static void nmi_stop(void);
+static void nmi_cpu_start(void *dummy);
+static void nmi_cpu_stop(void *dummy);
/* 0 == registered but off, 1 == registered and on */
static int nmi_enabled = 0;
+#ifdef CONFIG_SMP
+static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
+ void *data)
+{
+ int cpu = (unsigned long)data;
+ switch (action) {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
+ break;
+ case CPU_DOWN_PREPARE:
+ smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block oprofile_cpu_nb = {
+ .notifier_call = oprofile_cpu_notifier
+};
+#endif
+
#ifdef CONFIG_PM
static int nmi_suspend(struct sys_device *dev, pm_message_t state)
{
+ /* Only one CPU left, just stop that one */
if (nmi_enabled == 1)
- nmi_stop();
+ nmi_cpu_stop(NULL);
return 0;
}
static int nmi_resume(struct sys_device *dev)
{
if (nmi_enabled == 1)
- nmi_start();
+ nmi_cpu_start(NULL);
return 0;
}
@@ -463,6 +489,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
}
init_sysfs();
+#ifdef CONFIG_SMP
+ register_cpu_notifier(&oprofile_cpu_nb);
+#endif
using_nmi = 1;
ops->create_files = nmi_create_files;
ops->setup = nmi_setup;
@@ -476,6 +505,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
void op_nmi_exit(void)
{
- if (using_nmi)
+ if (using_nmi) {
exit_sysfs();
+#ifdef CONFIG_SMP
+ unregister_cpu_notifier(&oprofile_cpu_nb);
+#endif
+ }
}
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 23faaa890ffc..2bd5c53f6386 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -365,7 +365,7 @@ static void __init pci_mmcfg_reject_broken(int early)
return;
reject:
- printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
+ printk(KERN_INFO "PCI: Not using MMCONFIG.\n");
pci_mmcfg_arch_free();
kfree(pci_mmcfg_config);
pci_mmcfg_config = NULL;
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c
index 7dc5d5cf50a2..d3e083dea720 100644
--- a/arch/x86/power/cpu_32.c
+++ b/arch/x86/power/cpu_32.c
@@ -45,7 +45,7 @@ static void __save_processor_state(struct saved_context *ctxt)
ctxt->cr0 = read_cr0();
ctxt->cr2 = read_cr2();
ctxt->cr3 = read_cr3();
- ctxt->cr4 = read_cr4();
+ ctxt->cr4 = read_cr4_safe();
}
/* Needed by apm.c */
@@ -98,7 +98,9 @@ static void __restore_processor_state(struct saved_context *ctxt)
/*
* control registers
*/
- write_cr4(ctxt->cr4);
+ /* cr4 was introduced in the Pentium CPU */
+ if (ctxt->cr4)
+ write_cr4(ctxt->cr4);
write_cr3(ctxt->cr3);
write_cr2(ctxt->cr2);
write_cr0(ctxt->cr0);
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index b95aa6cfe3cb..4fc7e872c85e 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -28,9 +28,9 @@ ENTRY(swsusp_arch_suspend)
ret
ENTRY(restore_image)
- movl resume_pg_dir, %ecx
- subl $__PAGE_OFFSET, %ecx
- movl %ecx, %cr3
+ movl resume_pg_dir, %eax
+ subl $__PAGE_OFFSET, %eax
+ movl %eax, %cr3
movl restore_pblist, %edx
.p2align 4,,7
@@ -52,17 +52,21 @@ copy_loop:
done:
/* go back to the original page tables */
- movl $swapper_pg_dir, %ecx
- subl $__PAGE_OFFSET, %ecx
- movl %ecx, %cr3
+ movl $swapper_pg_dir, %eax
+ subl $__PAGE_OFFSET, %eax
+ movl %eax, %cr3
/* Flush TLB, including "global" things (vmalloc) */
- movl mmu_cr4_features, %eax
- movl %eax, %edx
+ movl mmu_cr4_features, %ecx
+ jecxz 1f # cr4 Pentium and higher, skip if zero
+ movl %ecx, %edx
andl $~(1<<7), %edx; # PGE
movl %edx, %cr4; # turn off PGE
- movl %cr3, %ecx; # flush TLB
- movl %ecx, %cr3
- movl %eax, %cr4; # turn PGE back on
+1:
+ movl %cr3, %eax; # flush TLB
+ movl %eax, %cr3
+ jecxz 1f # cr4 Pentium and higher, skip if zero
+ movl %ecx, %cr4; # turn PGE back on
+1:
movl saved_context_esp, %esp
movl saved_context_ebp, %ebp
diff --git a/crypto/digest.c b/crypto/digest.c
index ac0919460d14..5d3f1303da98 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -225,7 +225,7 @@ int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
struct ahash_tfm *crt = &tfm->crt_ahash;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
- if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
+ if (dalg->dia_digestsize > PAGE_SIZE / 8)
return -EINVAL;
crt->init = digest_async_init;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 59821a22d752..66368022e0bf 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -481,21 +481,31 @@ next_one:
for (k = 0, temp = 0; k < template[i].np; k++) {
printk(KERN_INFO "page %u\n", k);
- q = &axbuf[IDX[k]];
- hexdump(q, template[i].tap[k]);
+ q = &xbuf[IDX[k]];
+
+ n = template[i].tap[k];
+ if (k == template[i].np - 1)
+ n += enc ? authsize : -authsize;
+ hexdump(q, n);
printk(KERN_INFO "%s\n",
- memcmp(q, template[i].result + temp,
- template[i].tap[k] -
- (k < template[i].np - 1 || enc ?
- 0 : authsize)) ?
+ memcmp(q, template[i].result + temp, n) ?
"fail" : "pass");
- for (n = 0; q[template[i].tap[k] + n]; n++)
- ;
+ q += n;
+ if (k == template[i].np - 1 && !enc) {
+ if (memcmp(q, template[i].input +
+ temp + n, authsize))
+ n = authsize;
+ else
+ n = 0;
+ } else {
+ for (n = 0; q[n]; n++)
+ ;
+ }
if (n) {
printk("Result buffer corruption %u "
"bytes:\n", n);
- hexdump(&q[template[i].tap[k]], n);
+ hexdump(q, n);
}
temp += template[i].tap[k];
diff --git a/drivers/Makefile b/drivers/Makefile
index a280ab3d0833..2735bde73475 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
obj-$(CONFIG_PARIDE) += block/paride/
obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_USB) += usb/
+obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/gadget/
obj-$(CONFIG_SERIO) += input/serio/
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index bb7c51f712bd..7d2edf143f16 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -563,9 +563,6 @@ EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
*/
static int handle_eject_request(struct dock_station *ds, u32 event)
{
- if (!dock_present(ds))
- return -ENODEV;
-
if (dock_in_progress(ds))
return -EBUSY;
@@ -573,8 +570,16 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
* here we need to generate the undock
* event prior to actually doing the undock
* so that the device struct still exists.
+ * Also, even send the dock event if the
+ * device is not present anymore
*/
dock_event(ds, event, UNDOCK_EVENT);
+
+ if (!dock_present(ds)) {
+ complete_undock(ds);
+ return -ENODEV;
+ }
+
hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
undock(ds);
eject_dock(ds);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 5622aee996b2..13593f9f2197 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -110,6 +110,31 @@ static struct acpi_ec {
u8 handlers_installed;
} *boot_ec, *first_ec;
+/*
+ * Some Asus system have exchanged ECDT data/command IO addresses.
+ */
+static int print_ecdt_error(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE PREFIX "%s detected - "
+ "ECDT has exchanged control/data I/O address\n",
+ id->ident);
+ return 0;
+}
+
+static struct dmi_system_id __cpuinitdata ec_dmi_table[] = {
+ {
+ print_ecdt_error, "Asus L4R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
+ DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
+ {
+ print_ecdt_error, "Asus M6R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "0207"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
+ DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
+ {},
+};
+
/* --------------------------------------------------------------------------
Transaction Management
-------------------------------------------------------------------------- */
@@ -196,6 +221,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
return 0;
msleep(1);
}
+ if (acpi_ec_check_status(ec,event))
+ return 0;
}
pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
acpi_ec_read_status(ec),
@@ -911,6 +938,15 @@ int __init acpi_ec_ecdt_probe(void)
pr_info(PREFIX "EC description table is found, configuring boot EC\n");
boot_ec->command_addr = ecdt_ptr->control.address;
boot_ec->data_addr = ecdt_ptr->data.address;
+ if (dmi_check_system(ec_dmi_table)) {
+ /*
+ * If the board falls into ec_dmi_table, it means
+ * that ECDT table gives the incorrect command/status
+ * & data I/O address. Just fix it.
+ */
+ boot_ec->data_addr = ecdt_ptr->control.address;
+ boot_ec->command_addr = ecdt_ptr->data.address;
+ }
boot_ec->gpe = ecdt_ptr->gpe;
boot_ec->handle = ACPI_ROOT_OBJECT;
acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index 2a32c843cb4a..8892b9824fae 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -479,5 +479,8 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
acpi_tb_set_table_loaded_flag(table_index, FALSE);
+ /* Table unloaded, remove a reference to the ddb_handle object */
+
+ acpi_ut_remove_reference(ddb_handle);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index 549db42f16cf..bd5773878009 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -56,13 +56,14 @@ ACPI_MODULE_NAME("nsnames")
* Size - Size of the pathname
* *name_buffer - Where to return the pathname
*
- * RETURN: Places the pathname into the name_buffer, in external format
+ * RETURN: Status
+ * Places the pathname into the name_buffer, in external format
* (name segments separated by path separators)
*
* DESCRIPTION: Generate a full pathaname
*
******************************************************************************/
-void
+acpi_status
acpi_ns_build_external_path(struct acpi_namespace_node *node,
acpi_size size, char *name_buffer)
{
@@ -77,7 +78,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
if (index < ACPI_NAME_SIZE) {
name_buffer[0] = AML_ROOT_PREFIX;
name_buffer[1] = 0;
- return;
+ return (AE_OK);
}
/* Store terminator byte, then build name backwards */
@@ -105,11 +106,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
if (index != 0) {
ACPI_ERROR((AE_INFO,
- "Could not construct pathname; index=%X, size=%X, Path=%s",
+ "Could not construct external pathname; index=%X, size=%X, Path=%s",
(u32) index, (u32) size, &name_buffer[size]));
+
+ return (AE_BAD_PARAMETER);
}
- return;
+ return (AE_OK);
}
#ifdef ACPI_DEBUG_OUTPUT
@@ -129,6 +132,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
+ acpi_status status;
char *name_buffer;
acpi_size size;
@@ -138,8 +142,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
size = acpi_ns_get_pathname_length(node);
if (!size) {
- ACPI_ERROR((AE_INFO, "Invalid node failure"));
- return_PTR(NULL);
+ return (NULL);
}
/* Allocate a buffer to be returned to caller */
@@ -152,7 +155,11 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
/* Build the path in the allocated buffer */
- acpi_ns_build_external_path(node, size, name_buffer);
+ status = acpi_ns_build_external_path(node, size, name_buffer);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
return_PTR(name_buffer);
}
#endif
@@ -186,7 +193,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
while (next_node && (next_node != acpi_gbl_root_node)) {
if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
ACPI_ERROR((AE_INFO,
- "Invalid NS Node (%p) while traversing path",
+ "Invalid Namespace Node (%p) while traversing namespace",
next_node));
return 0;
}
@@ -234,8 +241,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
required_size = acpi_ns_get_pathname_length(node);
if (!required_size) {
- ACPI_ERROR((AE_INFO, "Invalid node failure"));
- return_ACPI_STATUS(AE_ERROR);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Validate/Allocate/Clear caller buffer */
@@ -247,7 +253,11 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
/* Build the path in the caller buffer */
- acpi_ns_build_external_path(node, required_size, buffer->pointer);
+ status =
+ acpi_ns_build_external_path(node, required_size, buffer->pointer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n",
(char *)buffer->pointer, (u32) required_size));
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 89f3b2abfdc7..cf47805a7448 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -849,7 +849,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
if (irq < 0)
continue;
- if (irq >= ACPI_MAX_IRQS)
+ if (irq >= ARRAY_SIZE(acpi_irq_penalty))
continue;
if (used)
@@ -872,10 +872,12 @@ static int __init acpi_irq_penalty_update(char *str, int used)
*/
void acpi_penalize_isa_irq(int irq, int active)
{
- if (active)
- acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
- else
- acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (active)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
}
/*
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e36422a7122c..d3f0a62efcc1 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -123,7 +123,7 @@ struct acpi_processor_errata errata __read_mostly;
static int set_no_mwait(const struct dmi_system_id *id)
{
printk(KERN_NOTICE PREFIX "%s detected - "
- "disable mwait for CPU C-stetes\n", id->ident);
+ "disabling mwait for CPU C-states\n", id->ident);
idle_nomwait = 1;
return 0;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 283c08f5f4d4..cf5b1b7b684f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -41,7 +41,6 @@
#include <linux/pm_qos_params.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
-#include <linux/cpuidle.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0133af49cf06..80e32093e977 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -70,7 +70,7 @@ static DEFINE_MUTEX(performance_mutex);
* 0 -> cpufreq low level drivers initialized -> consider _PPC values
* 1 -> ignore _PPC totally -> forced by user through boot param
*/
-static unsigned int ignore_ppc = -1;
+static int ignore_ppc = -1;
module_param(ignore_ppc, uint, 0644);
MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
"limited by BIOS, this should help");
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index f61ebc679e66..d9063ea414e3 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -587,6 +587,9 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
} else {
temp_size_needed +=
acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
+ if (!temp_size_needed) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
}
} else {
/*
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index e7bf34a7b1d2..7dcb67e0b215 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -242,10 +242,12 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
{
acpi_status status = AE_OK;
- if (!required_length) {
- WARN_ON(1);
- return AE_ERROR;
+ /* Parameter validation */
+
+ if (!buffer || !required_length) {
+ return (AE_BAD_PARAMETER);
}
+
switch (buffer->length) {
case ACPI_NO_BUFFER:
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index c5c791a575c9..42609d3a8aa9 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -135,6 +135,10 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
obj_pointer = object->package.elements;
break;
+ /*
+ * These objects have a possible list of notify handlers.
+ * Device object also may have a GPE block.
+ */
case ACPI_TYPE_DEVICE:
if (object->device.gpe_block) {
@@ -142,9 +146,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
gpe_block);
}
- /* Walk the handler list for this device */
+ /*lint -fallthrough */
+
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* Walk the notify handler list for this object */
- handler_desc = object->device.handler;
+ handler_desc = object->common_notify.handler;
while (handler_desc) {
next_desc = handler_desc->address_space.next;
acpi_ut_remove_reference(handler_desc);
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index e25484495e65..916eff399eb3 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -425,6 +425,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
acpi_size * obj_length)
{
acpi_size length;
+ acpi_size size;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
@@ -484,10 +485,14 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
* Get the actual length of the full pathname to this object.
* The reference will be converted to the pathname to the object
*/
- length +=
- ACPI_ROUND_UP_TO_NATIVE_WORD
- (acpi_ns_get_pathname_length
- (internal_object->reference.node));
+ size =
+ acpi_ns_get_pathname_length(internal_object->
+ reference.node);
+ if (!size) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ length += ACPI_ROUND_UP_TO_NATIVE_WORD(size);
break;
default:
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
index c33b1c6e93b1..cfe2c833474d 100644
--- a/drivers/acpi/wmi.c
+++ b/drivers/acpi/wmi.c
@@ -347,7 +347,7 @@ struct acpi_buffer *out)
strcpy(method, "WQ");
strncat(method, block->object_id, 2);
- status = acpi_evaluate_object(handle, method, NULL, out);
+ status = acpi_evaluate_object(handle, method, &input, out);
/*
* If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 338b0b4dce9c..46f507531177 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -150,6 +150,9 @@ struct agp_bridge_data {
char minor_version;
struct list_head list;
u32 apbase_config;
+ /* list of agp_memory mapped to the aperture */
+ struct list_head mapped_list;
+ spinlock_t mapped_lock;
};
#define KB(x) ((x) * 1024)
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 1ffb381130c3..31dcd9142d54 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -110,7 +110,8 @@ static int ali_configure(void)
nlvm_addr+= agp_bridge->gart_bus_addr;
nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
- printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr);
+ dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n",
+ nlvm_addr);
}
#endif
@@ -315,8 +316,8 @@ static int __devinit agp_ali_probe(struct pci_dev *pdev,
goto found;
}
- printk(KERN_ERR PFX "Unsupported ALi chipset (device id: %04x)\n",
- pdev->device);
+ dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
+ pdev->vendor, pdev->device);
return -ENODEV;
@@ -361,8 +362,7 @@ found:
bridge->driver = &ali_generic_bridge;
}
- printk(KERN_INFO PFX "Detected ALi %s chipset\n",
- devs[j].chipset_name);
+ dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
/* Fill in the mode register */
pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 39a0718bc616..e280531843be 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -419,8 +419,8 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
return -ENODEV;
j = ent - agp_amdk7_pci_table;
- printk(KERN_INFO PFX "Detected AMD %s chipset\n",
- amd_agp_device_ids[j].chipset_name);
+ dev_info(&pdev->dev, "AMD %s chipset\n",
+ amd_agp_device_ids[j].chipset_name);
bridge = agp_alloc_bridge();
if (!bridge)
@@ -442,7 +442,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
while (!cap_ptr) {
gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
if (!gfxcard) {
- printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
+ dev_info(&pdev->dev, "no AGP VGA controller\n");
return -ENODEV;
}
cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
@@ -453,7 +453,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
(if necessary at all). */
if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
agp_bridge->flags |= AGP_ERRATA_1X;
- printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
+ dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
}
pci_dev_put(gfxcard);
}
@@ -469,7 +469,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
agp_bridge->flags = AGP_ERRATA_FASTWRITES;
agp_bridge->flags |= AGP_ERRATA_SBA;
agp_bridge->flags |= AGP_ERRATA_1X;
- printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
+ dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
}
}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 481ffe87c716..7495c522d8e4 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -34,6 +34,7 @@
static struct resource *aperture_resource;
static int __initdata agp_try_unsupported = 1;
+static int agp_bridges_found;
static void amd64_tlbflush(struct agp_memory *temp)
{
@@ -293,12 +294,13 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
* so let double check that order, and lets trust the AMD NB settings
*/
if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
- printk(KERN_INFO "Aperture size %u MB is not right, using settings from NB\n",
- 32 << order);
+ dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
+ 32 << order);
order = nb_order;
}
- printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order);
+ dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
+ aper, 32 << order);
if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
return -1;
@@ -319,10 +321,10 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
for (i = 0; i < num_k8_northbridges; i++) {
struct pci_dev *dev = k8_northbridges[i];
if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
- printk(KERN_ERR PFX "No usable aperture found.\n");
+ dev_err(&dev->dev, "no usable aperture found\n");
#ifdef __x86_64__
/* should port this to i386 */
- printk(KERN_ERR PFX "Consider rebooting with iommu=memaper=2 to get a good aperture.\n");
+ dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
#endif
return -1;
}
@@ -345,14 +347,14 @@ static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data
default: revstring="??"; break;
}
- printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring);
+ dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
/*
* Work around errata.
* Chips before B2 stepping incorrectly reporting v3.5
*/
if (pdev->revision < 0x13) {
- printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n");
+ dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
bridge->major_version = 3;
bridge->minor_version = 0;
}
@@ -375,11 +377,11 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
struct pci_dev *dev1;
int i;
unsigned size = amd64_fetch_size();
- printk(KERN_INFO "Setting up ULi AGP.\n");
+
+ dev_info(&pdev->dev, "setting up ULi AGP\n");
dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
if (dev1 == NULL) {
- printk(KERN_INFO PFX "Detected a ULi chipset, "
- "but could not fine the secondary device.\n");
+ dev_info(&pdev->dev, "can't find ULi secondary device\n");
return -ENODEV;
}
@@ -388,7 +390,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
break;
if (i == ARRAY_SIZE(uli_sizes)) {
- printk(KERN_INFO PFX "No ULi size found for %d\n", size);
+ dev_info(&pdev->dev, "no ULi size found for %d\n", size);
return -ENODEV;
}
@@ -433,13 +435,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
int i;
unsigned size = amd64_fetch_size();
- printk(KERN_INFO PFX "Setting up Nforce3 AGP.\n");
+ dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
if (dev1 == NULL) {
- printk(KERN_INFO PFX "agpgart: Detected an NVIDIA "
- "nForce3 chipset, but could not find "
- "the secondary device.\n");
+ dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
return -ENODEV;
}
@@ -448,7 +448,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
break;
if (i == ARRAY_SIZE(nforce3_sizes)) {
- printk(KERN_INFO PFX "No NForce3 size found for %d\n", size);
+ dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
return -ENODEV;
}
@@ -462,7 +462,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) {
- printk(KERN_INFO PFX "aperture base > 4G\n");
+ dev_info(&pdev->dev, "aperture base > 4G\n");
return -ENODEV;
}
@@ -489,6 +489,7 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
+ int err;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
@@ -504,7 +505,8 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
amd8151_init(pdev, bridge);
} else {
- printk(KERN_INFO PFX "Detected AGP bridge %x\n", pdev->devfn);
+ dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
}
bridge->driver = &amd_8151_driver;
@@ -536,7 +538,12 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, bridge);
- return agp_add_bridge(bridge);
+ err = agp_add_bridge(bridge);
+ if (err < 0)
+ return err;
+
+ agp_bridges_found++;
+ return 0;
}
static void __devexit agp_amd64_remove(struct pci_dev *pdev)
@@ -713,7 +720,11 @@ int __init agp_amd64_init(void)
if (agp_off)
return -EINVAL;
- if (pci_register_driver(&agp_amd64_pci_driver) < 0) {
+ err = pci_register_driver(&agp_amd64_pci_driver);
+ if (err < 0)
+ return err;
+
+ if (agp_bridges_found == 0) {
struct pci_dev *dev;
if (!agp_try_unsupported && !agp_try_unsupported_boot) {
printk(KERN_INFO PFX "No supported AGP bridge found.\n");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3a4566c0d84f..6ecbcafb34b1 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -486,8 +486,8 @@ static int __devinit agp_ati_probe(struct pci_dev *pdev,
goto found;
}
- printk(KERN_ERR PFX
- "Unsupported Ati chipset (device id: %04x)\n", pdev->device);
+ dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
+ pdev->vendor, pdev->device);
return -ENODEV;
found:
@@ -500,8 +500,7 @@ found:
bridge->driver = &ati_generic_bridge;
- printk(KERN_INFO PFX "Detected Ati %s chipset\n",
- devs[j].chipset_name);
+ dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
/* Fill in the mode register */
pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 1ec87104e68c..3a3cc03d401c 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -144,7 +144,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
void *addr = bridge->driver->agp_alloc_page(bridge);
if (!addr) {
- printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
+ dev_err(&bridge->dev->dev,
+ "can't get memory for scratch page\n");
return -ENOMEM;
}
@@ -155,13 +156,13 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
size_value = bridge->driver->fetch_size();
if (size_value == 0) {
- printk(KERN_ERR PFX "unable to determine aperture size.\n");
+ dev_err(&bridge->dev->dev, "can't determine aperture size\n");
rc = -EINVAL;
goto err_out;
}
if (bridge->driver->create_gatt_table(bridge)) {
- printk(KERN_ERR PFX
- "unable to get memory for graphics translation table.\n");
+ dev_err(&bridge->dev->dev,
+ "can't get memory for graphics translation table\n");
rc = -ENOMEM;
goto err_out;
}
@@ -169,7 +170,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
bridge->key_list = vmalloc(PAGE_SIZE * 4);
if (bridge->key_list == NULL) {
- printk(KERN_ERR PFX "error allocating memory for key lists.\n");
+ dev_err(&bridge->dev->dev,
+ "can't allocate memory for key lists\n");
rc = -ENOMEM;
goto err_out;
}
@@ -179,10 +181,12 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
memset(bridge->key_list, 0, PAGE_SIZE * 4);
if (bridge->driver->configure()) {
- printk(KERN_ERR PFX "error configuring host chipset.\n");
+ dev_err(&bridge->dev->dev, "error configuring host chipset\n");
rc = -EINVAL;
goto err_out;
}
+ INIT_LIST_HEAD(&bridge->mapped_list);
+ spin_lock_init(&bridge->mapped_lock);
return 0;
@@ -269,25 +273,27 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
/* Grab reference on the chipset driver. */
if (!try_module_get(bridge->driver->owner)) {
- printk (KERN_INFO PFX "Couldn't lock chipset driver.\n");
+ dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
return -EINVAL;
}
error = agp_backend_initialize(bridge);
if (error) {
- printk (KERN_INFO PFX "agp_backend_initialize() failed.\n");
+ dev_info(&bridge->dev->dev,
+ "agp_backend_initialize() failed\n");
goto err_out;
}
if (list_empty(&agp_bridges)) {
error = agp_frontend_initialize();
if (error) {
- printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n");
+ dev_info(&bridge->dev->dev,
+ "agp_frontend_initialize() failed\n");
goto frontend_err;
}
- printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
- bridge->driver->fetch_size(), bridge->gart_bus_addr);
+ dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
+ bridge->driver->fetch_size(), bridge->gart_bus_addr);
}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index dd370be00d68..10d6cbd7c05e 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -446,6 +446,10 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
curr->is_bound = true;
curr->pg_start = pg_start;
+ spin_lock(&agp_bridge->mapped_lock);
+ list_add(&curr->mapped_list, &agp_bridge->mapped_list);
+ spin_unlock(&agp_bridge->mapped_lock);
+
return 0;
}
EXPORT_SYMBOL(agp_bind_memory);
@@ -478,10 +482,34 @@ int agp_unbind_memory(struct agp_memory *curr)
curr->is_bound = false;
curr->pg_start = 0;
+ spin_lock(&curr->bridge->mapped_lock);
+ list_del(&curr->mapped_list);
+ spin_unlock(&curr->bridge->mapped_lock);
return 0;
}
EXPORT_SYMBOL(agp_unbind_memory);
+/**
+ * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
+ */
+int agp_rebind_memory(void)
+{
+ struct agp_memory *curr;
+ int ret_val = 0;
+
+ spin_lock(&agp_bridge->mapped_lock);
+ list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
+ ret_val = curr->bridge->driver->insert_memory(curr,
+ curr->pg_start,
+ curr->type);
+ if (ret_val != 0)
+ break;
+ }
+ spin_unlock(&agp_bridge->mapped_lock);
+ return ret_val;
+}
+EXPORT_SYMBOL(agp_rebind_memory);
+
/* End - Routines for handling swapping of agp_memory into the GATT */
@@ -788,8 +816,8 @@ void agp_device_command(u32 bridge_agpstat, bool agp_v3)
if (!agp)
continue;
- printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n",
- agp_v3 ? 3 : 2, pci_name(device), mode);
+ dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
+ agp_v3 ? 3 : 2, mode);
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
}
}
@@ -817,10 +845,8 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
get_agp_version(agp_bridge);
- printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
- agp_bridge->major_version,
- agp_bridge->minor_version,
- pci_name(agp_bridge->dev));
+ dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+ agp_bridge->major_version, agp_bridge->minor_version);
pci_read_config_dword(agp_bridge->dev,
agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
@@ -849,8 +875,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
pci_write_config_dword(bridge->dev,
bridge->capndx+AGPCTRL, temp);
- printk(KERN_INFO PFX "Device is in legacy mode,"
- " falling back to 2.x\n");
+ dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
}
}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 290a1cf63925..043e36628d6d 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -32,8 +32,8 @@
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
-#define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40
-#define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42
+#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02
#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
@@ -55,7 +55,7 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB)
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
@@ -161,7 +161,7 @@ static int intel_i810_fetch_size(void)
values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- printk(KERN_WARNING PFX "i810 is disabled\n");
+ dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
return 0;
}
if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
@@ -193,7 +193,8 @@ static int intel_i810_configure(void)
intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
- printk(KERN_ERR PFX "Unable to remap memory.\n");
+ dev_err(&intel_private.pcidev->dev,
+ "can't remap memory\n");
return -ENOMEM;
}
}
@@ -201,7 +202,8 @@ static int intel_i810_configure(void)
if ((readl(intel_private.registers+I810_DRAM_CTL)
& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
/* This will need to be dynamically assigned */
- printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
intel_private.num_dcache_entries = 1024;
}
pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
@@ -500,8 +502,8 @@ static void intel_i830_init_gtt_entries(void)
size = 1024 + 512;
break;
default:
- printk(KERN_INFO PFX "Unknown page table size, "
- "assuming 512KB\n");
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
size = 512;
}
size += 4; /* add in BIOS popup space */
@@ -515,8 +517,8 @@ static void intel_i830_init_gtt_entries(void)
size = 2048;
break;
default:
- printk(KERN_INFO PFX "Unknown page table size 0x%x, "
- "assuming 512KB\n",
+ dev_info(&agp_bridge->dev->dev,
+ "unknown page table size 0x%x, assuming 512KB\n",
(gmch_ctrl & G33_PGETBL_SIZE_MASK));
size = 512;
}
@@ -627,11 +629,11 @@ static void intel_i830_init_gtt_entries(void)
}
}
if (gtt_entries > 0)
- printk(KERN_INFO PFX "Detected %dK %s memory.\n",
+ dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
gtt_entries / KB(1), local ? "local" : "stolen");
else
- printk(KERN_INFO PFX
- "No pre-allocated video memory detected.\n");
+ dev_info(&agp_bridge->dev->dev,
+ "no pre-allocated video memory detected\n");
gtt_entries /= KB(4);
intel_private.gtt_entries = gtt_entries;
@@ -801,10 +803,12 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
num_entries = A_SIZE_FIX(temp)->num_entries;
if (pg_start < intel_private.gtt_entries) {
- printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
- printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
goto out_err;
}
@@ -851,7 +855,8 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
return 0;
if (pg_start < intel_private.gtt_entries) {
- printk(KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
return -EINVAL;
}
@@ -957,7 +962,7 @@ static void intel_i9xx_setup_flush(void)
if (intel_private.ifp_resource.start) {
intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
if (!intel_private.i9xx_flush_page)
- printk(KERN_INFO "unable to ioremap flush page - no chipset flushing");
+ dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
}
}
@@ -1028,10 +1033,12 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
num_entries = A_SIZE_FIX(temp)->num_entries;
if (pg_start < intel_private.gtt_entries) {
- printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
- printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
goto out_err;
}
@@ -1078,7 +1085,8 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
return 0;
if (pg_start < intel_private.gtt_entries) {
- printk(KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
return -EINVAL;
}
@@ -1182,7 +1190,7 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
{
switch (agp_bridge->dev->device) {
- case PCI_DEVICE_ID_INTEL_IGD_HB:
+ case PCI_DEVICE_ID_INTEL_GM45_HB:
case PCI_DEVICE_ID_INTEL_IGD_E_HB:
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
@@ -1379,7 +1387,7 @@ static int intel_815_configure(void)
/* the Intel 815 chipset spec. says that bits 29-31 in the
* ATTBASE register are reserved -> try not to write them */
if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
- printk(KERN_EMERG PFX "gatt bus addr too high");
+ dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high");
return -EINVAL;
}
@@ -2145,8 +2153,8 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0,
- "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+ "Mobile Intel? GM45 Express", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
"Intel Integrated Graphics Device", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
@@ -2191,8 +2199,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (intel_agp_chipsets[i].name == NULL) {
if (cap_ptr)
- printk(KERN_WARNING PFX "Unsupported Intel chipset"
- "(device id: %04x)\n", pdev->device);
+ dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -2200,9 +2208,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (bridge->driver == NULL) {
/* bridge has no AGP and no IGD detected */
if (cap_ptr)
- printk(KERN_WARNING PFX "Failed to find bridge device "
- "(chip_id: %04x)\n",
- intel_agp_chipsets[i].gmch_chip_id);
+ dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
+ intel_agp_chipsets[i].gmch_chip_id);
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -2211,8 +2218,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
bridge->capndx = cap_ptr;
bridge->dev_private_data = &intel_private;
- printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n",
- intel_agp_chipsets[i].name);
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
/*
* The following fixes the case where the BIOS has "forgotten" to
@@ -2222,7 +2228,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
r = &pdev->resource[0];
if (!r->start && r->end) {
if (pci_assign_resource(pdev, 0)) {
- printk(KERN_ERR PFX "could not assign resource 0\n");
+ dev_err(&pdev->dev, "can't assign resource 0\n");
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -2234,7 +2240,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
* 20030610 - hamish@zot.org
*/
if (pci_enable_device(pdev)) {
- printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+ dev_err(&pdev->dev, "can't enable PCI device\n");
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -2266,6 +2272,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
static int agp_intel_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+ int ret_val;
pci_restore_state(pdev);
@@ -2293,6 +2300,10 @@ static int agp_intel_resume(struct pci_dev *pdev)
else if (bridge->driver == &intel_i965_driver)
intel_i915_configure();
+ ret_val = agp_rebind_memory();
+ if (ret_val != 0)
+ return ret_val;
+
return 0;
}
#endif
@@ -2343,7 +2354,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_G33_HB),
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
- ID(PCI_DEVICE_ID_INTEL_IGD_HB),
+ ID(PCI_DEVICE_ID_INTEL_GM45_HB),
ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
index 3f9ccde62377..c73385cc4b8a 100644
--- a/drivers/char/agp/isoch.c
+++ b/drivers/char/agp/isoch.c
@@ -153,7 +153,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
/* Check if this configuration has any chance of working */
if (tot_bw > target.maxbw) {
- printk(KERN_ERR PFX "isochronous bandwidth required "
+ dev_err(&td->dev, "isochronous bandwidth required "
"by AGP 3.0 devices exceeds that which is supported by "
"the AGP 3.0 bridge!\n");
ret = -ENODEV;
@@ -188,7 +188,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
/* Exit if the minimal ISOCH_N allocation among the masters is more
* than the target can handle. */
if (tot_n > target.n) {
- printk(KERN_ERR PFX "number of isochronous "
+ dev_err(&td->dev, "number of isochronous "
"transactions per period required by AGP 3.0 devices "
"exceeds that which is supported by the AGP 3.0 "
"bridge!\n");
@@ -229,7 +229,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
/* Exit if the minimal RQ needs of the masters exceeds what the target
* can provide. */
if (tot_rq > rq_isoch) {
- printk(KERN_ERR PFX "number of request queue slots "
+ dev_err(&td->dev, "number of request queue slots "
"required by the isochronous bandwidth requested by "
"AGP 3.0 devices exceeds the number provided by the "
"AGP 3.0 bridge!\n");
@@ -359,8 +359,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
case 0x0001: /* Unclassified device */
/* Don't know what this is, but log it for investigation. */
if (mcapndx != 0) {
- printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n",
- dev->vendor, dev->device);
+ dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
+ pci_name(dev),
+ dev->vendor, dev->device);
}
continue;
@@ -407,17 +408,18 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
}
if (mcapndx == 0) {
- printk(KERN_ERR PFX "woah! Non-AGP device "
- "found on the secondary bus of an AGP 3.5 bridge!\n");
+ dev_err(&td->dev, "woah! Non-AGP device %s on "
+ "secondary bus of AGP 3.5 bridge!\n",
+ pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
if (mmajor < 3) {
- printk(KERN_ERR PFX "woah! AGP 2.0 device "
- "found on the secondary bus of an AGP 3.5 "
- "bridge operating with AGP 3.0 electricals!\n");
+ dev_err(&td->dev, "woah! AGP 2.0 device %s on "
+ "secondary bus of AGP 3.5 bridge operating "
+ "with AGP 3.0 electricals!\n", pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
@@ -427,10 +429,10 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
if (((mstatus >> 3) & 0x1) == 0) {
- printk(KERN_ERR PFX "woah! AGP 3.x device "
- "not operating in AGP 3.x mode found on the "
- "secondary bus of an AGP 3.5 bridge operating "
- "with AGP 3.0 electricals!\n");
+ dev_err(&td->dev, "woah! AGP 3.x device %s not "
+ "operating in AGP 3.x mode on secondary bus "
+ "of AGP 3.5 bridge operating with AGP 3.0 "
+ "electricals!\n", pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
@@ -444,9 +446,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
if (isoch) {
ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
if (ret) {
- printk(KERN_INFO PFX "Something bad happened setting "
- "up isochronous xfers. Falling back to "
- "non-isochronous xfer mode.\n");
+ dev_info(&td->dev, "something bad happened setting "
+ "up isochronous xfers; falling back to "
+ "non-isochronous xfer mode\n");
} else {
goto free_and_exit;
}
@@ -466,4 +468,3 @@ free_and_exit:
get_out:
return ret;
}
-
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index b6791846809f..2587ef96a960 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -79,10 +79,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
u32 command;
int rate;
- printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
- agp_bridge->major_version,
- agp_bridge->minor_version,
- pci_name(agp_bridge->dev));
+ dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+ agp_bridge->major_version, agp_bridge->minor_version);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
command = agp_collect_device_status(bridge, mode, command);
@@ -94,8 +92,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
if (!agp)
continue;
- printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n",
- pci_name(device), rate);
+ dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n",
+ pci_name(device), rate);
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
@@ -105,7 +103,7 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
* cannot be configured
*/
if (device->device == bridge->dev->device) {
- printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n");
+ dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n");
msleep(10);
}
}
@@ -190,7 +188,8 @@ static int __devinit agp_sis_probe(struct pci_dev *pdev,
return -ENODEV;
- printk(KERN_INFO PFX "Detected SiS chipset - id:%i\n", pdev->device);
+ dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
@@ -242,7 +241,7 @@ static struct pci_device_id agp_sis_pci_table[] = {
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
- .device = PCI_DEVICE_ID_SI_5591_AGP,
+ .device = PCI_DEVICE_ID_SI_5591,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 0e054c134490..2fb27fe4c10c 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -241,7 +241,8 @@ static void serverworks_tlbflush(struct agp_memory *temp)
while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n");
+ dev_err(&serverworks_private.svrwrks_dev->dev,
+ "TLB post flush took more than 3 seconds\n");
break;
}
}
@@ -251,7 +252,8 @@ static void serverworks_tlbflush(struct agp_memory *temp)
while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n");
+ dev_err(&serverworks_private.svrwrks_dev->dev,
+ "TLB Dir flush took more than 3 seconds\n");
break;
}
}
@@ -271,7 +273,7 @@ static int serverworks_configure(void)
temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
if (!serverworks_private.registers) {
- printk (KERN_ERR PFX "Unable to ioremap() memory.\n");
+ dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
return -ENOMEM;
}
@@ -451,7 +453,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
switch (pdev->device) {
case 0x0006:
- printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n");
+ dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
return -ENODEV;
case PCI_DEVICE_ID_SERVERWORKS_HE:
@@ -461,8 +463,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
default:
if (cap_ptr)
- printk(KERN_ERR PFX "Unsupported Serverworks chipset "
- "(device id: %04x)\n", pdev->device);
+ dev_err(&pdev->dev, "unsupported Serverworks chipset "
+ "[%04x/%04x]\n", pdev->vendor, pdev->device);
return -ENODEV;
}
@@ -470,8 +472,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
PCI_DEVFN(0, 1));
if (!bridge_dev) {
- printk(KERN_INFO PFX "Detected a Serverworks chipset "
- "but could not find the secondary device.\n");
+ dev_info(&pdev->dev, "can't find secondary device\n");
return -ENODEV;
}
@@ -482,8 +483,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
if (temp2 != 0) {
- printk(KERN_INFO PFX "Detected 64 bit aperture address, "
- "but top bits are not zero. Disabling agp\n");
+ dev_info(&pdev->dev, "64 bit aperture address, "
+ "but top bits are not zero; disabling AGP\n");
return -ENODEV;
}
serverworks_private.mm_addr_ofs = 0x18;
@@ -495,8 +496,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
pci_read_config_dword(pdev,
serverworks_private.mm_addr_ofs + 4, &temp2);
if (temp2 != 0) {
- printk(KERN_INFO PFX "Detected 64 bit MMIO address, "
- "but top bits are not zero. Disabling agp\n");
+ dev_info(&pdev->dev, "64 bit MMIO address, but top "
+ "bits are not zero; disabling AGP\n");
return -ENODEV;
}
}
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index d2fa3cfca02a..eef72709ec53 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -46,8 +46,8 @@ static int uninorth_fetch_size(void)
break;
if (i == agp_bridge->driver->num_aperture_sizes) {
- printk(KERN_ERR PFX "Invalid aperture size, using"
- " default\n");
+ dev_err(&agp_bridge->dev->dev, "invalid aperture size, "
+ "using default\n");
size = 0;
aperture = NULL;
}
@@ -108,8 +108,8 @@ static int uninorth_configure(void)
current_size = A_SIZE_32(agp_bridge->current_size);
- printk(KERN_INFO PFX "configuring for size idx: %d\n",
- current_size->size_value);
+ dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n",
+ current_size->size_value);
/* aperture size and gatt addr */
pci_write_config_dword(agp_bridge->dev,
@@ -197,8 +197,9 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i) {
if (gp[i]) {
- printk("u3_insert_memory: entry 0x%x occupied (%x)\n",
- i, gp[i]);
+ dev_info(&agp_bridge->dev->dev,
+ "u3_insert_memory: entry 0x%x occupied (%x)\n",
+ i, gp[i]);
return -EBUSY;
}
}
@@ -276,8 +277,8 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
&scratch);
} while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
- printk(KERN_ERR PFX "failed to write UniNorth AGP"
- " command register\n");
+ dev_err(&bridge->dev->dev, "can't write UniNorth AGP "
+ "command register\n");
if (uninorth_rev >= 0x30) {
/* This is an AGP V3 */
@@ -330,8 +331,8 @@ static int agp_uninorth_suspend(struct pci_dev *pdev)
pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
if (!(cmd & PCI_AGP_COMMAND_AGP))
continue;
- printk("uninorth-agp: disabling AGP on device %s\n",
- pci_name(device));
+ dev_info(&pdev->dev, "disabling AGP on device %s\n",
+ pci_name(device));
cmd &= ~PCI_AGP_COMMAND_AGP;
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
}
@@ -341,8 +342,7 @@ static int agp_uninorth_suspend(struct pci_dev *pdev)
pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
bridge->dev_private_data = (void *)(long)cmd;
if (cmd & PCI_AGP_COMMAND_AGP) {
- printk("uninorth-agp: disabling AGP on bridge %s\n",
- pci_name(pdev));
+ dev_info(&pdev->dev, "disabling AGP on bridge\n");
cmd &= ~PCI_AGP_COMMAND_AGP;
pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
}
@@ -591,14 +591,14 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
/* probe for known chipsets */
for (j = 0; devs[j].chipset_name != NULL; ++j) {
if (pdev->device == devs[j].device_id) {
- printk(KERN_INFO PFX "Detected Apple %s chipset\n",
- devs[j].chipset_name);
+ dev_info(&pdev->dev, "Apple %s chipset\n",
+ devs[j].chipset_name);
goto found;
}
}
- printk(KERN_ERR PFX "Unsupported Apple chipset (device id: %04x).\n",
- pdev->device);
+ dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
return -ENODEV;
found:
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 02aac104842d..fd64137b1ab9 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -322,11 +322,10 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
hp->tty = tty;
- if (hp->ops->notifier_add)
- rc = hp->ops->notifier_add(hp, hp->data);
-
spin_unlock_irqrestore(&hp->lock, flags);
+ if (hp->ops->notifier_add)
+ rc = hp->ops->notifier_add(hp, hp->data);
/*
* If the notifier fails we return an error. The tty layer
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index f7feae4ebb5e..128202e18fc9 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -31,6 +31,7 @@
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/cpufeature.h>
+#include <asm/i387.h>
#define PFX KBUILD_MODNAME ": "
@@ -67,16 +68,23 @@ enum {
* Another possible performance boost may come from simply buffering
* until we have 4 bytes, thus returning a u32 at a time,
* instead of the current u8-at-a-time.
+ *
+ * Padlock instructions can generate a spurious DNA fault, so
+ * we have to call them in the context of irq_ts_save/restore()
*/
static inline u32 xstore(u32 *addr, u32 edx_in)
{
u32 eax_out;
+ int ts_state;
+
+ ts_state = irq_ts_save();
asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
:"=m"(*addr), "=a"(eax_out)
:"D"(addr), "d"(edx_in));
+ irq_ts_restore(ts_state);
return eax_out;
}
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index b1414507997c..3a23e7694d55 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -29,7 +29,6 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/uaccess.h>
-#include <linux/version.h>
#include "tty.h"
#include "network.h"
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index d9799e2bcfbf..f53d4d00faf0 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -78,7 +78,6 @@
#include <linux/wait.h>
#include <linux/bcd.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
#include <linux/uaccess.h>
#include <asm/current.h>
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 509c89ac5bd3..08911ed66494 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -47,7 +47,6 @@
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 0e6866fe0f96..a27160ba21d7 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -2496,45 +2496,25 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
}
/**
- * tiocswinsz - implement window size set ioctl
- * @tty; tty
- * @arg: user buffer for result
+ * tty_do_resize - resize event
+ * @tty: tty being resized
+ * @real_tty: real tty (if using a pty/tty pair)
+ * @rows: rows (character)
+ * @cols: cols (character)
*
- * Copies the user idea of the window size to the kernel. Traditionally
- * this is just advisory information but for the Linux console it
- * actually has driver level meaning and triggers a VC resize.
- *
- * Locking:
- * Called function use the console_sem is used to ensure we do
- * not try and resize the console twice at once.
- * The tty->termios_mutex is used to ensure we don't double
- * resize and get confused. Lock order - tty->termios_mutex before
- * console sem
+ * Update the termios variables and send the neccessary signals to
+ * peform a terminal resize correctly
*/
-static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
- struct winsize __user *arg)
+int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize *ws)
{
- struct winsize tmp_ws;
struct pid *pgrp, *rpgrp;
unsigned long flags;
- if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
- return -EFAULT;
-
mutex_lock(&tty->termios_mutex);
- if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg)))
+ if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
goto done;
-
-#ifdef CONFIG_VT
- if (tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) {
- if (vc_lock_resize(tty->driver_data, tmp_ws.ws_col,
- tmp_ws.ws_row)) {
- mutex_unlock(&tty->termios_mutex);
- return -ENXIO;
- }
- }
-#endif
/* Get the PID values and reference them so we can
avoid holding the tty ctrl lock while sending signals */
spin_lock_irqsave(&tty->ctrl_lock, flags);
@@ -2550,14 +2530,42 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
put_pid(pgrp);
put_pid(rpgrp);
- tty->winsize = tmp_ws;
- real_tty->winsize = tmp_ws;
+ tty->winsize = *ws;
+ real_tty->winsize = *ws;
done:
mutex_unlock(&tty->termios_mutex);
return 0;
}
/**
+ * tiocswinsz - implement window size set ioctl
+ * @tty; tty
+ * @arg: user buffer for result
+ *
+ * Copies the user idea of the window size to the kernel. Traditionally
+ * this is just advisory information but for the Linux console it
+ * actually has driver level meaning and triggers a VC resize.
+ *
+ * Locking:
+ * Driver dependant. The default do_resize method takes the
+ * tty termios mutex and ctrl_lock. The console takes its own lock
+ * then calls into the default method.
+ */
+
+static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize __user *arg)
+{
+ struct winsize tmp_ws;
+ if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+ return -EFAULT;
+
+ if (tty->ops->resize)
+ return tty->ops->resize(tty, real_tty, &tmp_ws);
+ else
+ return tty_do_resize(tty, real_tty, &tmp_ws);
+}
+
+/**
* tioccons - allow admin to move logical console
* @file: the file to become console
*
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1bc00c9d860d..60359c360912 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -803,7 +803,25 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
*/
#define VC_RESIZE_MAXCOL (32767)
#define VC_RESIZE_MAXROW (32767)
-int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
+
+/**
+ * vc_do_resize - resizing method for the tty
+ * @tty: tty being resized
+ * @real_tty: real tty (different to tty if a pty/tty pair)
+ * @vc: virtual console private data
+ * @cols: columns
+ * @lines: lines
+ *
+ * Resize a virtual console, clipping according to the actual constraints.
+ * If the caller passes a tty structure then update the termios winsize
+ * information and perform any neccessary signal handling.
+ *
+ * Caller must hold the console semaphore. Takes the termios mutex and
+ * ctrl_lock of the tty IFF a tty is passed.
+ */
+
+static int vc_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct vc_data *vc, unsigned int cols, unsigned int lines)
{
unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
unsigned int old_cols, old_rows, old_row_size, old_screen_size;
@@ -907,24 +925,15 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
gotoxy(vc, vc->vc_x, vc->vc_y);
save_cur(vc);
- if (vc->vc_tty) {
- struct winsize ws, *cws = &vc->vc_tty->winsize;
- struct pid *pgrp = NULL;
-
+ if (tty) {
+ /* Rewrite the requested winsize data with the actual
+ resulting sizes */
+ struct winsize ws;
memset(&ws, 0, sizeof(ws));
ws.ws_row = vc->vc_rows;
ws.ws_col = vc->vc_cols;
ws.ws_ypixel = vc->vc_scan_lines;
-
- spin_lock_irq(&vc->vc_tty->ctrl_lock);
- if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col))
- pgrp = get_pid(vc->vc_tty->pgrp);
- spin_unlock_irq(&vc->vc_tty->ctrl_lock);
- if (pgrp) {
- kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1);
- put_pid(pgrp);
- }
- *cws = ws;
+ tty_do_resize(tty, real_tty, &ws);
}
if (CON_IS_VISIBLE(vc))
@@ -932,14 +941,47 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
return err;
}
-int vc_lock_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
+/**
+ * vc_resize - resize a VT
+ * @vc: virtual console
+ * @cols: columns
+ * @rows: rows
+ *
+ * Resize a virtual console as seen from the console end of things. We
+ * use the common vc_do_resize methods to update the structures. The
+ * caller must hold the console sem to protect console internals and
+ * vc->vc_tty
+ */
+
+int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
+{
+ return vc_do_resize(vc->vc_tty, vc->vc_tty, vc, cols, rows);
+}
+
+/**
+ * vt_resize - resize a VT
+ * @tty: tty to resize
+ * @real_tty: tty if a pty/tty pair
+ * @ws: winsize attributes
+ *
+ * Resize a virtual terminal. This is called by the tty layer as we
+ * register our own handler for resizing. The mutual helper does all
+ * the actual work.
+ *
+ * Takes the console sem and the called methods then take the tty
+ * termios_mutex and the tty ctrl_lock in that order.
+ */
+
+int vt_resize(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize *ws)
{
- int rc;
+ struct vc_data *vc = tty->driver_data;
+ int ret;
acquire_console_sem();
- rc = vc_resize(vc, cols, lines);
+ ret = vc_do_resize(tty, real_tty, vc, ws->ws_col, ws->ws_row);
release_console_sem();
- return rc;
+ return ret;
}
void vc_deallocate(unsigned int currcons)
@@ -2907,6 +2949,7 @@ static const struct tty_operations con_ops = {
.start = con_start,
.throttle = con_throttle,
.unthrottle = con_unthrottle,
+ .resize = vt_resize,
};
int __init vty_init(void)
@@ -4061,7 +4104,6 @@ EXPORT_SYMBOL(default_blu);
EXPORT_SYMBOL(update_region);
EXPORT_SYMBOL(redraw_screen);
EXPORT_SYMBOL(vc_resize);
-EXPORT_SYMBOL(vc_lock_resize);
EXPORT_SYMBOL(fg_console);
EXPORT_SYMBOL(console_blank_hook);
EXPORT_SYMBOL(console_blanked);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 3211afd9d57e..c904e9ad4a71 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -947,14 +947,16 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
get_user(cc, &vtsizes->v_cols))
ret = -EFAULT;
else {
+ acquire_console_sem();
for (i = 0; i < MAX_NR_CONSOLES; i++) {
vc = vc_cons[i].d;
if (vc) {
vc->vc_resize_user = 1;
- vc_lock_resize(vc_cons[i].d, cc, ll);
+ vc_resize(vc_cons[i].d, cc, ll);
}
}
+ release_console_sem();
}
break;
}
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 8bfee5fb7223..278c9857bcf5 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -74,7 +74,6 @@
* currently programmed in the FPGA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ba7b9a6b17a1..a4bec3f919aa 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
+ int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
if (unlikely(!ldev))
return 0;
+ /* Special case when user has set very strict latency requirement */
+ if (unlikely(latency_req == 0)) {
+ ladder_do_selection(ldev, last_idx, 0);
+ return 0;
+ }
+
last_state = &ldev->states[last_idx];
if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
@@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
/* consider promotion */
if (last_idx < dev->state_count - 1 &&
last_residency > last_state->threshold.promotion_time &&
- dev->states[last_idx + 1].exit_latency <=
- pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
+ dev->states[last_idx + 1].exit_latency <= latency_req) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -92,7 +98,19 @@ static int ladder_select_state(struct cpuidle_device *dev)
}
/* consider demotion */
- if (last_idx > 0 &&
+ if (last_idx > CPUIDLE_DRIVER_STATE_START &&
+ dev->states[last_idx].exit_latency > latency_req) {
+ int i;
+
+ for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
+ if (dev->states[i].exit_latency <= latency_req)
+ break;
+ }
+ ladder_do_selection(ldev, last_idx, i);
+ return i;
+ }
+
+ if (last_idx > CPUIDLE_DRIVER_STATE_START &&
last_residency < last_state->threshold.demotion_time) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
@@ -117,7 +135,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
struct ladder_device_state *lstate;
struct cpuidle_state *state;
- ldev->last_state_idx = 0;
+ ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
for (i = 0; i < dev->state_count; i++) {
state = &dev->states[i];
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 78d77c5dc35c..8d7cf3f31450 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static int menu_select(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
+ int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
int i;
+ /* Special case when user has set very strict latency requirement */
+ if (unlikely(latency_req == 0)) {
+ data->last_state_idx = 0;
+ return 0;
+ }
+
/* determine the expected residency time */
data->expected_us =
(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
/* find the deepest idle state that satisfies our constraints */
- for (i = 1; i < dev->state_count; i++) {
+ for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
struct cpuidle_state *s = &dev->states[i];
if (s->target_residency > data->expected_us)
break;
if (s->target_residency > data->predicted_us)
break;
- if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
+ if (s->exit_latency > latency_req)
break;
}
@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
- unsigned int measured_us =
- cpuidle_get_last_residency(dev) + data->elapsed_us;
+ unsigned int last_idle_us = cpuidle_get_last_residency(dev);
struct cpuidle_state *target = &dev->states[last_idx];
+ unsigned int measured_us;
/*
* Ugh, this idle state doesn't support residency measurements, so we
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev)
* for one full standard timer tick. However, be aware that this
* could potentially result in a suboptimal state transition.
*/
- if (!(target->flags & CPUIDLE_FLAG_TIME_VALID))
- measured_us = USEC_PER_SEC / HZ;
+ if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
+ last_idle_us = USEC_PER_SEC / HZ;
+
+ /*
+ * measured_us and elapsed_us are the cumulative idle time, since the
+ * last time we were woken out of idle by an interrupt.
+ */
+ if (data->elapsed_us <= data->elapsed_us + last_idle_us)
+ measured_us = data->elapsed_us + last_idle_us;
+ else
+ measured_us = -1;
+
+ /* Predict time until next break event */
+ data->predicted_us = max(measured_us, data->last_measured_us);
- /* Predict time remaining until next break event */
- if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) {
- data->predicted_us = max(measured_us, data->last_measured_us);
+ if (last_idle_us + BREAK_FUZZ <
+ data->expected_us - target->exit_latency) {
data->last_measured_us = measured_us;
data->elapsed_us = 0;
} else {
- if (data->elapsed_us < data->elapsed_us + measured_us)
- data->elapsed_us = measured_us;
- else
- data->elapsed_us = -1;
- data->predicted_us = max(measured_us, data->last_measured_us);
+ data->elapsed_us = measured_us;
}
}
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 31a0e0b455b6..97b003839fb6 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -21,8 +21,8 @@ static int __init cpuidle_sysfs_setup(char *unused)
}
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
-static ssize_t show_available_governors(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_available_governors(struct sysdev_class *class,
+ char *buf)
{
ssize_t i = 0;
struct cpuidle_governor *tmp;
@@ -40,8 +40,8 @@ out:
return i;
}
-static ssize_t show_current_driver(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_current_driver(struct sysdev_class *class,
+ char *buf)
{
ssize_t ret;
@@ -55,8 +55,8 @@ static ssize_t show_current_driver(struct sys_device *dev,
return ret;
}
-static ssize_t show_current_governor(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_current_governor(struct sysdev_class *class,
+ char *buf)
{
ssize_t ret;
@@ -70,9 +70,8 @@ static ssize_t show_current_governor(struct sys_device *dev,
return ret;
}
-static ssize_t store_current_governor(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_current_governor(struct sysdev_class *class,
+ const char *buf, size_t count)
{
char gov_name[CPUIDLE_NAME_LEN];
int ret = -EINVAL;
@@ -104,8 +103,9 @@ static ssize_t store_current_governor(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL);
-static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
+static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL);
+static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor,
+ NULL);
static struct attribute *cpuclass_default_attrs[] = {
&attr_current_driver.attr,
@@ -113,9 +113,10 @@ static struct attribute *cpuclass_default_attrs[] = {
NULL
};
-static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL);
-static SYSDEV_ATTR(current_governor, 0644, show_current_governor,
- store_current_governor);
+static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors,
+ NULL);
+static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor,
+ store_current_governor);
static struct attribute *cpuclass_switch_attrs[] = {
&attr_available_governors.attr,
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 54a2a166e566..bf2917d197a0 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <asm/byteorder.h>
+#include <asm/i387.h>
#include "padlock.h"
/* Control word. */
@@ -141,6 +142,12 @@ static inline void padlock_reset_key(void)
asm volatile ("pushfl; popfl");
}
+/*
+ * While the padlock instructions don't use FP/SSE registers, they
+ * generate a spurious DNA fault when cr0.ts is '1'. These instructions
+ * should be used only inside the irq_ts_save/restore() context
+ */
+
static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
void *control_word)
{
@@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
+ int ts_state;
padlock_reset_key();
+
+ ts_state = irq_ts_save();
aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
+ irq_ts_restore(ts_state);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
+ int ts_state;
padlock_reset_key();
+
+ ts_state = irq_ts_save();
aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
+ irq_ts_restore(ts_state);
}
static struct crypto_alg aes_alg = {
@@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ int ts_state;
padlock_reset_key();
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
@@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ irq_ts_restore(ts_state);
return err;
}
@@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ int ts_state;
padlock_reset_key();
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
@@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
-
+ irq_ts_restore(ts_state);
return err;
}
@@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ int ts_state;
padlock_reset_key();
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
@@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ irq_ts_restore(ts_state);
return err;
}
@@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ int ts_state;
padlock_reset_key();
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ irq_ts_restore(ts_state);
return err;
}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 40d5680fa013..a7fbadebf623 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
+#include <asm/i387.h>
#include "padlock.h"
#define SHA1_DEFAULT_FALLBACK "sha1-generic"
@@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count)
* PadLock microcode needs it that big. */
char buf[128+16];
char *result = NEAREST_ALIGNED(buf);
+ int ts_state;
((uint32_t *)result)[0] = SHA1_H0;
((uint32_t *)result)[1] = SHA1_H1;
@@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count)
((uint32_t *)result)[3] = SHA1_H3;
((uint32_t *)result)[4] = SHA1_H4;
+ /* prevent taking the spurious DNA fault with padlock. */
+ ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: "+S"(in), "+D"(result)
: "c"(count), "a"(0));
+ irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
}
@@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count)
* PadLock microcode needs it that big. */
char buf[128+16];
char *result = NEAREST_ALIGNED(buf);
+ int ts_state;
((uint32_t *)result)[0] = SHA256_H0;
((uint32_t *)result)[1] = SHA256_H1;
@@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count)
((uint32_t *)result)[6] = SHA256_H6;
((uint32_t *)result)[7] = SHA256_H7;
+ /* prevent taking the spurious DNA fault with padlock. */
+ ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: "+S"(in), "+D"(result)
: "c"(count), "a"(0));
+ irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 681c15f42083..ee827a7f7c6a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -96,6 +96,9 @@ struct talitos_private {
unsigned int exec_units;
unsigned int desc_types;
+ /* SEC Compatibility info */
+ unsigned long features;
+
/* next channel to be assigned next incoming descriptor */
atomic_t last_chan;
@@ -133,6 +136,9 @@ struct talitos_private {
struct hwrng rng;
};
+/* .features flag */
+#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
+
/*
* map virtual single (contiguous) pointer to h/w descriptor pointer
*/
@@ -785,7 +791,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
/* copy the generated ICV to dst */
if (edesc->dma_len) {
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 1];
+ edesc->dst_nents + 2];
sg = sg_last(areq->dst, edesc->dst_nents);
memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
icvdata, ctx->authsize);
@@ -814,7 +820,7 @@ static void ipsec_esp_decrypt_done(struct device *dev,
/* auth check */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 1];
+ edesc->dst_nents + 2];
else
icvdata = &edesc->link_tbl[0];
@@ -921,10 +927,30 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
&edesc->link_tbl[0]);
if (sg_count > 1) {
+ struct talitos_ptr *link_tbl_ptr =
+ &edesc->link_tbl[sg_count-1];
+ struct scatterlist *sg;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
+ /* If necessary for this SEC revision,
+ * add a link table entry for ICV.
+ */
+ if ((priv->features &
+ TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) &&
+ (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
+ link_tbl_ptr->j_extent = 0;
+ link_tbl_ptr++;
+ link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+ link_tbl_ptr->len = cpu_to_be16(authsize);
+ sg = sg_last(areq->src, edesc->src_nents ? : 1);
+ link_tbl_ptr->ptr = cpu_to_be32(
+ (char *)sg_dma_address(sg)
+ + sg->length - authsize);
+ }
} else {
/* Only one segment now, so no link tbl needed */
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
@@ -944,12 +970,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
- &edesc->link_tbl[edesc->src_nents];
- struct scatterlist *sg;
+ &edesc->link_tbl[edesc->src_nents + 1];
desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
edesc->dma_link_tbl +
- edesc->src_nents);
+ edesc->src_nents + 1);
if (areq->src == areq->dst) {
memcpy(link_tbl_ptr, &edesc->link_tbl[0],
edesc->src_nents * sizeof(struct talitos_ptr));
@@ -957,14 +982,10 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
}
+ /* Add an entry to the link table for ICV data */
link_tbl_ptr += sg_count - 1;
-
- /* handle case where sg_last contains the ICV exclusively */
- sg = sg_last(areq->dst, edesc->dst_nents);
- if (sg->length == ctx->authsize)
- link_tbl_ptr--;
-
link_tbl_ptr->j_extent = 0;
+ sg_count++;
link_tbl_ptr++;
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
link_tbl_ptr->len = cpu_to_be16(authsize);
@@ -973,7 +994,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
edesc->dma_link_tbl +
edesc->src_nents +
- edesc->dst_nents + 1);
+ edesc->dst_nents + 2);
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1040,12 +1061,12 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
/*
* allocate space for base edesc plus the link tables,
- * allowing for a separate entry for the generated ICV (+ 1),
+ * allowing for two separate entries for ICV and generated ICV (+ 2),
* and the ICV data itself
*/
alloc_len = sizeof(struct ipsec_esp_edesc);
if (src_nents || dst_nents) {
- dma_len = (src_nents + dst_nents + 1) *
+ dma_len = (src_nents + dst_nents + 2) *
sizeof(struct talitos_ptr) + ctx->authsize;
alloc_len += dma_len;
} else {
@@ -1104,7 +1125,7 @@ static int aead_authenc_decrypt(struct aead_request *req)
/* stash incoming ICV for later cmp with ICV generated by the h/w */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 1];
+ edesc->dst_nents + 2];
else
icvdata = &edesc->link_tbl[0];
@@ -1480,6 +1501,9 @@ static int talitos_probe(struct of_device *ofdev,
goto err_out;
}
+ if (of_device_is_compatible(np, "fsl,sec3.0"))
+ priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
+
priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
GFP_KERNEL);
priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a4e4494663bf..0328da020a10 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,7 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/memory.h>
-#include <asm/plat-orion/mv_xor.h>
+#include <plat/mv_xor.h>
#include "mv_xor.h"
static void mv_xor_issue_pending(struct dma_chan *chan);
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 001622eb86f9..3bf8ee120d42 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -84,20 +84,23 @@ static struct kobj_type memmap_ktype = {
*/
/*
- * Firmware memory map entries
+ * Firmware memory map entries. No locking is needed because the
+ * firmware_map_add() and firmware_map_add_early() functions are called
+ * in firmware initialisation code in one single thread of execution.
*/
static LIST_HEAD(map_entries);
/**
- * Common implementation of firmware_map_add() and firmware_map_add_early()
- * which expects a pre-allocated struct firmware_map_entry.
- *
+ * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
* @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
* entry.
- */
+ *
+ * Common implementation of firmware_map_add() and firmware_map_add_early()
+ * which expects a pre-allocated struct firmware_map_entry.
+ **/
static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
const char *type,
struct firmware_map_entry *entry)
@@ -115,33 +118,52 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
return 0;
}
-/*
- * See <linux/firmware-map.h> for documentation.
- */
+/**
+ * firmware_map_add() - Adds a firmware mapping entry.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (inclusive).
+ * @type: Type of the memory range.
+ *
+ * This function uses kmalloc() for memory
+ * allocation. Use firmware_map_add_early() if you want to use the bootmem
+ * allocator.
+ *
+ * That function must be called before late_initcall.
+ *
+ * Returns 0 on success, or -ENOMEM if no memory could be allocated.
+ **/
int firmware_map_add(resource_size_t start, resource_size_t end,
const char *type)
{
struct firmware_map_entry *entry;
entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
- WARN_ON(!entry);
if (!entry)
return -ENOMEM;
return firmware_map_add_entry(start, end, type, entry);
}
-/*
- * See <linux/firmware-map.h> for documentation.
- */
+/**
+ * firmware_map_add_early() - Adds a firmware mapping entry.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (inclusive).
+ * @type: Type of the memory range.
+ *
+ * Adds a firmware mapping entry. This function uses the bootmem allocator
+ * for memory allocation. Use firmware_map_add() if you want to use kmalloc().
+ *
+ * That function must be called before late_initcall.
+ *
+ * Returns 0 on success, or -ENOMEM if no memory could be allocated.
+ **/
int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
const char *type)
{
struct firmware_map_entry *entry;
entry = alloc_bootmem_low(sizeof(struct firmware_map_entry));
- WARN_ON(!entry);
- if (!entry)
+ if (WARN_ON(!entry))
return -ENOMEM;
return firmware_map_add_entry(start, end, type, entry);
@@ -183,7 +205,10 @@ static ssize_t memmap_attr_show(struct kobject *kobj,
/*
* Initialises stuff and adds the entries in the map_entries list to
* sysfs. Important is that firmware_map_add() and firmware_map_add_early()
- * must be called before late_initcall.
+ * must be called before late_initcall. That's just because that function
+ * is called as late_initcall() function, which means that if you call
+ * firmware_map_add() or firmware_map_add_early() afterwards, the entries
+ * are not added to sysfs.
*/
static int __init memmap_init(void)
{
@@ -192,13 +217,13 @@ static int __init memmap_init(void)
struct kset *memmap_kset;
memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
- WARN_ON(!memmap_kset);
- if (!memmap_kset)
+ if (WARN_ON(!memmap_kset))
return -ENOMEM;
list_for_each_entry(entry, &map_entries, list) {
entry->kobj.kset = memmap_kset;
- kobject_add(&entry->kobj, NULL, "%d", i++);
+ if (kobject_add(&entry->kobj, NULL, "%d", i++))
+ kobject_put(&entry->kobj);
}
return 0;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 61e78a4369b9..b15f88249639 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -654,12 +654,12 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD | HID_QUIRK_IGNORE_MOUSE},
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE},
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE},
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD | HID_QUIRK_IGNORE_MOUSE },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bf4ebfb86fa5..d402e8d813ce 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -77,6 +77,22 @@ config SENSORS_AD7418
This driver can also be built as a module. If so, the module
will be called ad7418.
+config SENSORS_ADCXX
+ tristate "National Semiconductor ADCxxxSxxx"
+ depends on SPI_MASTER && EXPERIMENTAL
+ help
+ If you say yes here you get support for the National Semiconductor
+ ADC<bb><c>S<sss> chip family, where
+ * bb is the resolution in number of bits (8, 10, 12)
+ * c is the number of channels (1, 2, 4, 8)
+ * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500
+ kSPS and 101 for 1 MSPS)
+
+ Examples : ADC081S101, ADC124S501, ...
+
+ This driver can also be built as a module. If so, the module
+ will be called adcxx.
+
config SENSORS_ADM1021
tristate "Analog Devices ADM1021 and compatibles"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 7943e5cefb06..950134ab8426 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7414) += ad7414.o
obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
+obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o
obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index f00f497b9ca9..d568c65c1370 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1,5 +1,8 @@
/*
- abituguru3.c Copyright (c) 2006 Hans de Goede <j.w.r.degoede@hhs.nl>
+ abituguru3.c
+
+ Copyright (c) 2006-2008 Hans de Goede <j.w.r.degoede@hhs.nl>
+ Copyright (c) 2008 Alistair John Strachan <alistair@devzero.co.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -116,7 +119,7 @@ struct abituguru3_sensor_info {
struct abituguru3_motherboard_info {
u16 id;
- const char *name;
+ const char *dmi_name;
/* + 1 -> end of sensors indicated by a sensor with name == NULL */
struct abituguru3_sensor_info sensors[ABIT_UGURU3_MAX_NO_SENSORS + 1];
};
@@ -161,7 +164,7 @@ struct abituguru3_data {
/* Constants */
static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
- { 0x000C, "unknown", {
+ { 0x000C, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -183,7 +186,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX1 Fan", 35, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x000D, "Abit AW8", {
+ { 0x000D, NULL /* Abit AW8, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -212,7 +215,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX5 Fan", 39, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x000E, "AL-8", {
+ { 0x000E, NULL /* AL-8, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -233,7 +236,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "SYS Fan", 34, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x000F, "unknown", {
+ { 0x000F, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -254,7 +257,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "SYS Fan", 34, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0010, "Abit NI8 SLI GR", {
+ { 0x0010, NULL /* Abit NI8 SLI GR, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -276,7 +279,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "OTES1 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0011, "Abit AT8 32X", {
+ { 0x0011, NULL /* Abit AT8 32X, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 20, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -302,7 +305,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX2 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0012, "Abit AN8 32X", {
+ { 0x0012, NULL /* Abit AN8 32X, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 20, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -324,7 +327,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX1 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0013, "Abit AW8D", {
+ { 0x0013, NULL /* Abit AW8D, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -353,7 +356,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX5 Fan", 39, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0014, "Abit AB9 Pro", {
+ { 0x0014, NULL /* Abit AB9 Pro, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -374,7 +377,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "SYS Fan", 34, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0015, "unknown", {
+ { 0x0015, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 20, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -398,7 +401,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0016, "AW9D-MAX", {
+ { 0x0016, NULL /* AW9D-MAX, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -426,7 +429,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "OTES1 Fan", 38, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0017, "unknown", {
+ { 0x0017, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -451,7 +454,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 FAN", 37, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0018, "unknown", {
+ { 0x0018, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -478,7 +481,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0019, "unknown", {
+ { 0x0019, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 7, 0, 10, 1, 0 },
{ "DDR2", 13, 0, 20, 1, 0 },
{ "DDR2 VTT", 14, 0, 10, 1, 0 },
@@ -505,7 +508,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 FAN", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x001A, "Abit IP35 Pro", {
+ { 0x001A, "IP35 Pro(Intel P35-ICH9R)", {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -533,7 +536,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX4 Fan", 37, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x001B, "unknown", {
+ { 0x001B, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR3", 1, 0, 20, 1, 0 },
{ "DDR3 VTT", 2, 0, 10, 1, 0 },
@@ -560,7 +563,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x001C, "unknown", {
+ { 0x001C, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -935,9 +938,18 @@ static int __devinit abituguru3_probe(struct platform_device *pdev)
goto abituguru3_probe_error;
}
data->sensors = abituguru3_motherboards[i].sensors;
+
printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard "
- "ID: %04X (%s)\n", (unsigned int)id,
- abituguru3_motherboards[i].name);
+ "ID: %04X\n", (unsigned int)id);
+
+#ifdef CONFIG_DMI
+ if (!abituguru3_motherboards[i].dmi_name) {
+ printk(KERN_WARNING ABIT_UGURU3_NAME ": this motherboard was "
+ "not detected using DMI. Please send the output of "
+ "\"dmidecode\" to the abituguru3 maintainer"
+ "(see MAINTAINERS)\n");
+ }
+#endif
/* Fill the sysfs attr array */
sysfs_attr_i = 0;
@@ -1109,6 +1121,46 @@ static struct platform_driver abituguru3_driver = {
.resume = abituguru3_resume
};
+#ifdef CONFIG_DMI
+
+static int __init abituguru3_dmi_detect(void)
+{
+ const char *board_vendor, *board_name;
+ int i, err = (force) ? 1 : -ENODEV;
+
+ board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+ if (!board_vendor || strcmp(board_vendor, "http://www.abit.com.tw/"))
+ return err;
+
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (!board_name)
+ return err;
+
+ for (i = 0; abituguru3_motherboards[i].id; i++) {
+ const char *dmi_name = abituguru3_motherboards[i].dmi_name;
+ if (dmi_name && !strcmp(dmi_name, board_name))
+ break;
+ }
+
+ if (!abituguru3_motherboards[i].id)
+ return 1;
+
+ return 0;
+}
+
+#else /* !CONFIG_DMI */
+
+static inline int abituguru3_dmi_detect(void)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_DMI */
+
+/* FIXME: Manual detection should die eventually; we need to collect stable
+ * DMI model names first before we can rely entirely on CONFIG_DMI.
+ */
+
static int __init abituguru3_detect(void)
{
/* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or
@@ -1119,7 +1171,7 @@ static int __init abituguru3_detect(void)
if (((data_val == 0x00) || (data_val == 0x08)) &&
((cmd_val == 0xAC) || (cmd_val == 0x05) ||
(cmd_val == 0x55)))
- return ABIT_UGURU3_BASE;
+ return 0;
ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = "
"0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
@@ -1127,7 +1179,7 @@ static int __init abituguru3_detect(void)
if (force) {
printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is "
"present because of \"force\" parameter\n");
- return ABIT_UGURU3_BASE;
+ return 0;
}
/* No uGuru3 found */
@@ -1138,27 +1190,29 @@ static struct platform_device *abituguru3_pdev;
static int __init abituguru3_init(void)
{
- int address, err;
struct resource res = { .flags = IORESOURCE_IO };
-
-#ifdef CONFIG_DMI
- const char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
-
- /* safety check, refuse to load on non Abit motherboards */
- if (!force && (!board_vendor ||
- strcmp(board_vendor, "http://www.abit.com.tw/")))
- return -ENODEV;
-#endif
-
- address = abituguru3_detect();
- if (address < 0)
- return address;
+ int err;
+
+ /* Attempt DMI detection first */
+ err = abituguru3_dmi_detect();
+ if (err < 0)
+ return err;
+
+ /* Fall back to manual detection if there was no exact
+ * board name match, or force was specified.
+ */
+ if (err > 0) {
+ err = abituguru3_detect();
+ if (err)
+ return err;
+ }
err = platform_driver_register(&abituguru3_driver);
if (err)
goto exit;
- abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME, address);
+ abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME,
+ ABIT_UGURU3_BASE);
if (!abituguru3_pdev) {
printk(KERN_ERR ABIT_UGURU3_NAME
": Device allocation failed\n");
@@ -1166,8 +1220,8 @@ static int __init abituguru3_init(void)
goto exit_driver_unregister;
}
- res.start = address;
- res.end = address + ABIT_UGURU3_REGION_LENGTH - 1;
+ res.start = ABIT_UGURU3_BASE;
+ res.end = ABIT_UGURU3_BASE + ABIT_UGURU3_REGION_LENGTH - 1;
res.name = ABIT_UGURU3_NAME;
err = platform_device_add_resources(abituguru3_pdev, &res, 1);
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
new file mode 100644
index 000000000000..242294db3db6
--- /dev/null
+++ b/drivers/hwmon/adcxx.c
@@ -0,0 +1,329 @@
+/*
+ * adcxx.c
+ *
+ * The adcxx4s is an AD converter family from National Semiconductor (NS).
+ *
+ * Copyright (c) 2008 Marc Pignat <marc.pignat@hevs.ch>
+ *
+ * The adcxx4s communicates with a host processor via an SPI/Microwire Bus
+ * interface. This driver supports the whole family of devices with name
+ * ADC<bb><c>S<sss>, where
+ * * bb is the resolution in number of bits (8, 10, 12)
+ * * c is the number of channels (1, 2, 4, 8)
+ * * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500 kSPS
+ * and 101 for 1 MSPS)
+ *
+ * Complete datasheets are available at National's website here:
+ * http://www.national.com/ds/DC/ADC<bb><c>S<sss>.pdf
+ *
+ * Handling of 8, 10 and 12 bits converters are the same, the
+ * unavailable bits are 0 :)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+
+#define DRVNAME "adcxx"
+
+struct adcxx {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ u32 channels;
+ u32 reference; /* in millivolts */
+};
+
+/* sysfs hook function */
+static ssize_t adcxx_read(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+ u8 tx_buf[2] = { attr->index << 3 }; /* other bits are don't care */
+ u8 rx_buf[2];
+ int status;
+ int value;
+
+ if (mutex_lock_interruptible(&adc->lock))
+ return -ERESTARTSYS;
+
+ status = spi_write_then_read(spi, tx_buf, sizeof(tx_buf),
+ rx_buf, sizeof(rx_buf));
+ if (status < 0) {
+ dev_warn(dev, "spi_write_then_read failed with status %d\n",
+ status);
+ goto out;
+ }
+
+ value = (rx_buf[0] << 8) + rx_buf[1];
+ dev_dbg(dev, "raw value = 0x%x\n", value);
+
+ value = value * adc->reference >> 12;
+ status = sprintf(buf, "%d\n", value);
+out:
+ mutex_unlock(&adc->lock);
+ return status;
+}
+
+static ssize_t adcxx_show_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ /* The minimum reference is 0 for this chip family */
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t adcxx_show_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+ u32 reference;
+
+ if (mutex_lock_interruptible(&adc->lock))
+ return -ERESTARTSYS;
+
+ reference = adc->reference;
+
+ mutex_unlock(&adc->lock);
+
+ return sprintf(buf, "%d\n", reference);
+}
+
+static ssize_t adcxx_set_max(struct device *dev,
+ struct device_attribute *devattr, const char *buf, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 10, &value))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&adc->lock))
+ return -ERESTARTSYS;
+
+ adc->reference = value;
+
+ mutex_unlock(&adc->lock);
+
+ return count;
+}
+
+static ssize_t adcxx_show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+
+ return sprintf(buf, "adcxx%ds\n", adc->channels);
+}
+
+static struct sensor_device_attribute ad_input[] = {
+ SENSOR_ATTR(name, S_IRUGO, adcxx_show_name, NULL, 0),
+ SENSOR_ATTR(in_min, S_IRUGO, adcxx_show_min, NULL, 0),
+ SENSOR_ATTR(in_max, S_IWUSR | S_IRUGO, adcxx_show_max,
+ adcxx_set_max, 0),
+ SENSOR_ATTR(in0_input, S_IRUGO, adcxx_read, NULL, 0),
+ SENSOR_ATTR(in1_input, S_IRUGO, adcxx_read, NULL, 1),
+ SENSOR_ATTR(in2_input, S_IRUGO, adcxx_read, NULL, 2),
+ SENSOR_ATTR(in3_input, S_IRUGO, adcxx_read, NULL, 3),
+ SENSOR_ATTR(in4_input, S_IRUGO, adcxx_read, NULL, 4),
+ SENSOR_ATTR(in5_input, S_IRUGO, adcxx_read, NULL, 5),
+ SENSOR_ATTR(in6_input, S_IRUGO, adcxx_read, NULL, 6),
+ SENSOR_ATTR(in7_input, S_IRUGO, adcxx_read, NULL, 7),
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __devinit adcxx_probe(struct spi_device *spi, int channels)
+{
+ struct adcxx *adc;
+ int status;
+ int i;
+
+ adc = kzalloc(sizeof *adc, GFP_KERNEL);
+ if (!adc)
+ return -ENOMEM;
+
+ /* set a default value for the reference */
+ adc->reference = 3300;
+ adc->channels = channels;
+ mutex_init(&adc->lock);
+
+ mutex_lock(&adc->lock);
+
+ dev_set_drvdata(&spi->dev, adc);
+
+ for (i = 0; i < 3 + adc->channels; i++) {
+ status = device_create_file(&spi->dev, &ad_input[i].dev_attr);
+ if (status) {
+ dev_err(&spi->dev, "device_create_file failed.\n");
+ goto out_err;
+ }
+ }
+
+ adc->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(adc->hwmon_dev)) {
+ dev_err(&spi->dev, "hwmon_device_register failed.\n");
+ status = PTR_ERR(adc->hwmon_dev);
+ goto out_err;
+ }
+
+ mutex_unlock(&adc->lock);
+ return 0;
+
+out_err:
+ for (i--; i >= 0; i--)
+ device_remove_file(&spi->dev, &ad_input[i].dev_attr);
+
+ dev_set_drvdata(&spi->dev, NULL);
+ mutex_unlock(&adc->lock);
+ kfree(adc);
+ return status;
+}
+
+static int __devinit adcxx1s_probe(struct spi_device *spi)
+{
+ return adcxx_probe(spi, 1);
+}
+
+static int __devinit adcxx2s_probe(struct spi_device *spi)
+{
+ return adcxx_probe(spi, 2);
+}
+
+static int __devinit adcxx4s_probe(struct spi_device *spi)
+{
+ return adcxx_probe(spi, 4);
+}
+
+static int __devinit adcxx8s_probe(struct spi_device *spi)
+{
+ return adcxx_probe(spi, 8);
+}
+
+static int __devexit adcxx_remove(struct spi_device *spi)
+{
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+ int i;
+
+ mutex_lock(&adc->lock);
+ hwmon_device_unregister(adc->hwmon_dev);
+ for (i = 0; i < 3 + adc->channels; i++)
+ device_remove_file(&spi->dev, &ad_input[i].dev_attr);
+
+ dev_set_drvdata(&spi->dev, NULL);
+ mutex_unlock(&adc->lock);
+ kfree(adc);
+
+ return 0;
+}
+
+static struct spi_driver adcxx1s_driver = {
+ .driver = {
+ .name = "adcxx1s",
+ .owner = THIS_MODULE,
+ },
+ .probe = adcxx1s_probe,
+ .remove = __devexit_p(adcxx_remove),
+};
+
+static struct spi_driver adcxx2s_driver = {
+ .driver = {
+ .name = "adcxx2s",
+ .owner = THIS_MODULE,
+ },
+ .probe = adcxx2s_probe,
+ .remove = __devexit_p(adcxx_remove),
+};
+
+static struct spi_driver adcxx4s_driver = {
+ .driver = {
+ .name = "adcxx4s",
+ .owner = THIS_MODULE,
+ },
+ .probe = adcxx4s_probe,
+ .remove = __devexit_p(adcxx_remove),
+};
+
+static struct spi_driver adcxx8s_driver = {
+ .driver = {
+ .name = "adcxx8s",
+ .owner = THIS_MODULE,
+ },
+ .probe = adcxx8s_probe,
+ .remove = __devexit_p(adcxx_remove),
+};
+
+static int __init init_adcxx(void)
+{
+ int status;
+ status = spi_register_driver(&adcxx1s_driver);
+ if (status)
+ goto reg_1_failed;
+
+ status = spi_register_driver(&adcxx2s_driver);
+ if (status)
+ goto reg_2_failed;
+
+ status = spi_register_driver(&adcxx4s_driver);
+ if (status)
+ goto reg_4_failed;
+
+ status = spi_register_driver(&adcxx8s_driver);
+ if (status)
+ goto reg_8_failed;
+
+ return status;
+
+reg_8_failed:
+ spi_unregister_driver(&adcxx4s_driver);
+reg_4_failed:
+ spi_unregister_driver(&adcxx2s_driver);
+reg_2_failed:
+ spi_unregister_driver(&adcxx1s_driver);
+reg_1_failed:
+ return status;
+}
+
+static void __exit exit_adcxx(void)
+{
+ spi_unregister_driver(&adcxx1s_driver);
+ spi_unregister_driver(&adcxx2s_driver);
+ spi_unregister_driver(&adcxx4s_driver);
+ spi_unregister_driver(&adcxx8s_driver);
+}
+
+module_init(init_adcxx);
+module_exit(exit_adcxx);
+
+MODULE_AUTHOR("Marc Pignat");
+MODULE_DESCRIPTION("National Semiconductor adcxx8sxxx Linux driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("adcxx1s");
+MODULE_ALIAS("adcxx2s");
+MODULE_ALIAS("adcxx4s");
+MODULE_ALIAS("adcxx8s");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index aacc0c4b809c..b06b8e090a27 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -98,6 +98,12 @@ static const char* temperature_sensors_sets[][36] = {
"TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S",
"TM1P", "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P",
"TM9S", "TN0H", "TS0C", NULL },
+/* Set 5: iMac */
+ { "TC0D", "TA0P", "TG0P", "TG0D", "TG0H", "TH0P", "Tm0P", "TO0P",
+ "Tp0C", NULL },
+/* Set 6: Macbook3 set */
+ { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TTF0", "TW0P", "Th0H",
+ "Th0S", "Th1H", NULL },
};
/* List of keys used to read/write fan speeds */
@@ -1223,6 +1229,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 0, .light = 0, .temperature_set = 3 },
/* MacPro: temperature set 4 */
{ .accelerometer = 0, .light = 0, .temperature_set = 4 },
+/* iMac: temperature set 5 */
+ { .accelerometer = 0, .light = 0, .temperature_set = 5 },
+/* MacBook3: accelerometer and temperature set 6 */
+ { .accelerometer = 1, .light = 0, .temperature_set = 6 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1232,10 +1242,14 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
(void*)&applesmc_dmi_data[0]},
- { applesmc_dmi_match, "Apple MacBook", {
+ { applesmc_dmi_match, "Apple MacBook (v2)", {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") },
(void*)&applesmc_dmi_data[1]},
+ { applesmc_dmi_match, "Apple MacBook (v3)", {
+ DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") },
+ (void*)&applesmc_dmi_data[6]},
{ applesmc_dmi_match, "Apple MacBook", {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
@@ -1248,6 +1262,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
(void*)&applesmc_dmi_data[4]},
+ { applesmc_dmi_match, "Apple iMac", {
+ DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME,"iMac") },
+ (void*)&applesmc_dmi_data[5]},
{ .ident = NULL }
};
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 70239acecc8e..93c17223b527 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -413,10 +413,11 @@ static int __init coretemp_init(void)
for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &cpu_data(i);
- /* check if family 6, models 0xe, 0xf, 0x16, 0x17 */
+ /* check if family 6, models 0xe, 0xf, 0x16, 0x17, 0x1A */
if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
!((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
- (c->x86_model == 0x16) || (c->x86_model == 0x17))) {
+ (c->x86_model == 0x16) || (c->x86_model == 0x17) ||
+ (c->x86_model == 0x1A))) {
/* supported CPU not found, but report the unknown
family 6 CPU */
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 7b0a32c4dcfb..c54eff92be4a 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -37,13 +37,21 @@
* For VRD 10.0 and up, "VRD x.y Design Guide",
* available at http://developer.intel.com/.
*
- * AMD NPT 0Fh (Athlon64 & Opteron), AMD Publication 32559,
+ * AMD Athlon 64 and AMD Opteron Processors, AMD Publication 26094,
+ * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/26094.PDF
+ * Table 74. VID Code Voltages
+ * This corresponds to an arbitrary VRM code of 24 in the functions below.
+ * These CPU models (K8 revision <= E) have 5 VID pins. See also:
+ * Revision Guide for AMD Athlon 64 and AMD Opteron Processors, AMD Publication 25759,
+ * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/25759.pdf
+ *
+ * AMD NPT Family 0Fh Processors, AMD Publication 32559,
* http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf
* Table 71. VID Code Voltages
- * AMD Opteron processors don't follow the Intel specifications.
- * I'm going to "make up" 2.4 as the spec number for the Opterons.
- * No good reason just a mnemonic for the 24x Opteron processor
- * series.
+ * This corresponds to an arbitrary VRM code of 25 in the functions below.
+ * These CPU models (K8 revision >= F) have 6 VID pins. See also:
+ * Revision Guide for AMD NPT Family 0Fh Processors, AMD Publication 33610,
+ * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
*
* The 17 specification is in fact Intel Mobile Voltage Positioning -
* (IMVP-II). You can find more information in the datasheet of Max1718
@@ -95,7 +103,12 @@ int vid_from_reg(int val, u8 vrm)
return 0;
return((1600000 - (val - 2) * 6250 + 500) / 1000);
- case 24: /* AMD NPT 0Fh (Athlon64 & Opteron) */
+ case 24: /* Athlon64 & Opteron */
+ val &= 0x1f;
+ if (val == 0x1f)
+ return 0;
+ /* fall through */
+ case 25: /* AMD NPT 0Fh */
val &= 0x3f;
return (val < 32) ? 1550 - 25 * val
: 775 - (25 * (val - 31)) / 2;
@@ -157,11 +170,16 @@ struct vrm_model {
#ifdef CONFIG_X86
-/* the stepping parameter is highest acceptable stepping for current line */
+/*
+ * The stepping parameter is highest acceptable stepping for current line.
+ * The model match must be exact for 4-bit values. For model values 0x10
+ * and above (extended model), all models below the parameter will match.
+ */
static struct vrm_model vrm_models[] = {
{X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
- {X86_VENDOR_AMD, 0xF, ANY, ANY, 24}, /* Athlon 64, Opteron and above VRM 24 */
+ {X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */
+ {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */
{X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */
{X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
{X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */
@@ -189,6 +207,8 @@ static u8 find_vrm(u8 eff_family, u8 eff_model, u8 eff_stepping, u8 vendor)
if (vrm_models[i].vendor==vendor)
if ((vrm_models[i].eff_family==eff_family)
&& ((vrm_models[i].eff_model==eff_model) ||
+ (vrm_models[i].eff_model >= 0x10 &&
+ eff_model <= vrm_models[i].eff_model) ||
(vrm_models[i].eff_model==ANY)) &&
(eff_stepping <= vrm_models[i].eff_stepping))
return vrm_models[i].vrm_type;
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index f9e2ed621f7b..2ede9388096b 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -81,6 +81,8 @@ static unsigned long amb_reg_temp(unsigned int amb)
#define MAX_AMBS_PER_CHANNEL 16
#define MAX_AMBS (MAX_MEM_CHANNELS * \
MAX_AMBS_PER_CHANNEL)
+#define CHANNEL_SHIFT 4
+#define DIMM_MASK 0xF
/*
* Ugly hack: For some reason the highest bit is set if there
* are _any_ DIMMs in the channel. Attempting to read from
@@ -89,7 +91,7 @@ static unsigned long amb_reg_temp(unsigned int amb)
* might prevent us from seeing the 16th DIMM in the channel.
*/
#define REAL_MAX_AMBS_PER_CHANNEL 15
-#define KNOBS_PER_AMB 5
+#define KNOBS_PER_AMB 6
static unsigned long amb_num_from_reg(unsigned int byte_num, unsigned int bit)
{
@@ -238,6 +240,16 @@ static ssize_t show_amb_temp(struct device *dev,
500 * amb_read_byte(data, amb_reg_temp(attr->index)));
}
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return sprintf(buf, "Ch. %d DIMM %d\n", attr->index >> CHANNEL_SHIFT,
+ attr->index & DIMM_MASK);
+}
+
static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
{
int i, j, k, d = 0;
@@ -268,6 +280,20 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
continue;
d++;
+ /* sysfs label */
+ iattr = data->attrs + data->num_attrs;
+ snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
+ "temp%d_label", d);
+ iattr->s_attr.dev_attr.attr.name = iattr->name;
+ iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
+ iattr->s_attr.dev_attr.show = show_label;
+ iattr->s_attr.index = k;
+ res = device_create_file(&pdev->dev,
+ &iattr->s_attr.dev_attr);
+ if (res)
+ goto exit_remove;
+ data->num_attrs++;
+
/* Temperature sysfs knob */
iattr = data->attrs + data->num_attrs;
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index c9416e657487..0f70dc204105 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -1,6 +1,6 @@
/*
- * A hwmon driver for the IBM Active Energy Manager temperature/power sensors
- * and capping functionality.
+ * A hwmon driver for the IBM System Director Active Energy Manager (AEM)
+ * temperature/power/energy sensors and capping functionality.
* Copyright (C) 2008 IBM
*
* Author: Darrick J. Wong <djwong@us.ibm.com>
@@ -463,12 +463,18 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
}
/* Update AEM energy registers */
+static void update_aem_energy_one(struct aem_data *data, int which)
+{
+ aem_read_sensor(data, AEM_ENERGY_ELEMENT, which,
+ &data->energy[which], 8);
+}
+
static void update_aem_energy(struct aem_data *data)
{
- aem_read_sensor(data, AEM_ENERGY_ELEMENT, 0, &data->energy[0], 8);
+ update_aem_energy_one(data, 0);
if (data->ver_major < 2)
return;
- aem_read_sensor(data, AEM_ENERGY_ELEMENT, 1, &data->energy[1], 8);
+ update_aem_energy_one(data, 1);
}
/* Update all AEM1 sensors */
@@ -676,7 +682,8 @@ static int aem_find_aem2(struct aem_ipmi_data *data,
return -ETIMEDOUT;
if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) ||
- memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id)))
+ memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id)) ||
+ fi_resp->num_instances <= instance_num)
return -ENOENT;
return 0;
@@ -849,7 +856,7 @@ static ssize_t aem_show_power(struct device *dev,
struct timespec b, a;
mutex_lock(&data->lock);
- update_aem_energy(data);
+ update_aem_energy_one(data, attr->index);
getnstimeofday(&b);
before = data->energy[attr->index];
@@ -861,7 +868,7 @@ static ssize_t aem_show_power(struct device *dev,
return 0;
}
- update_aem_energy(data);
+ update_aem_energy_one(data, attr->index);
getnstimeofday(&a);
after = data->energy[attr->index];
mutex_unlock(&data->lock);
@@ -880,7 +887,9 @@ static ssize_t aem_show_energy(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct aem_data *a = dev_get_drvdata(dev);
- a->update(a);
+ mutex_lock(&a->lock);
+ update_aem_energy_one(a, attr->index);
+ mutex_unlock(&a->lock);
return sprintf(buf, "%llu\n",
(unsigned long long)a->energy[attr->index] * 1000);
@@ -1104,7 +1113,7 @@ static void __exit aem_exit(void)
}
MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
-MODULE_DESCRIPTION("IBM Active Energy Manager power/temp sensor driver");
+MODULE_DESCRIPTION("IBM AEM power/temp/energy sensor driver");
MODULE_LICENSE("GPL");
module_init(aem_init);
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index daa7d121483b..de21142d106c 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1055,9 +1055,10 @@ static int w83791d_probe(struct i2c_client *client,
{
struct w83791d_data *data;
struct device *dev = &client->dev;
- int i, val1, err;
+ int i, err;
#ifdef DEBUG
+ int val1;
val1 = w83791d_read(client, W83791D_REG_DID_VID4);
dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n",
(val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1);
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 18355ae2155d..4655b794ebe3 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -1593,7 +1593,7 @@ fail1:
if (machine_is_omap_h2()) {
/* full speed signaling by default */
isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1,
- MC1_SPEED_REG);
+ MC1_SPEED);
isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2,
MC2_SPD_SUSP_CTRL);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0b0618edd645..1ab919f836a8 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -156,6 +156,14 @@ struct ehca_mod_qp_parm {
#define EHCA_MOD_QP_PARM_MAX 4
+#define QMAP_IDX_MASK 0xFFFFULL
+
+/* struct for tracking if cqes have been reported to the application */
+struct ehca_qmap_entry {
+ u16 app_wr_id;
+ u16 reported;
+};
+
struct ehca_qp {
union {
struct ib_qp ib_qp;
@@ -165,6 +173,7 @@ struct ehca_qp {
enum ehca_ext_qp_type ext_type;
enum ib_qp_state state;
struct ipz_queue ipz_squeue;
+ struct ehca_qmap_entry *sq_map;
struct ipz_queue ipz_rqueue;
struct h_galpas galpas;
u32 qkey;
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
index 818803057ebf..5d28e3e98a20 100644
--- a/drivers/infiniband/hw/ehca/ehca_qes.h
+++ b/drivers/infiniband/hw/ehca/ehca_qes.h
@@ -213,6 +213,7 @@ struct ehca_wqe {
#define WC_STATUS_ERROR_BIT 0x80000000
#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
#define WC_STATUS_PURGE_BIT 0x10
+#define WC_SEND_RECEIVE_BIT 0x80
struct ehca_cqe {
u64 work_request_id;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index ea13efddf175..b6bcee036734 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -412,6 +412,7 @@ static struct ehca_qp *internal_create_qp(
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
struct ib_ucontext *context = NULL;
+ u32 nr_qes;
u64 h_ret;
int is_llqp = 0, has_srq = 0;
int qp_type, max_send_sge, max_recv_sge, ret;
@@ -715,6 +716,15 @@ static struct ehca_qp *internal_create_qp(
"and pages ret=%i", ret);
goto create_qp_exit2;
}
+ nr_qes = my_qp->ipz_squeue.queue_length /
+ my_qp->ipz_squeue.qe_size;
+ my_qp->sq_map = vmalloc(nr_qes *
+ sizeof(struct ehca_qmap_entry));
+ if (!my_qp->sq_map) {
+ ehca_err(pd->device, "Couldn't allocate squeue "
+ "map ret=%i", ret);
+ goto create_qp_exit3;
+ }
}
if (HAS_RQ(my_qp)) {
@@ -724,7 +734,7 @@ static struct ehca_qp *internal_create_qp(
if (ret) {
ehca_err(pd->device, "Couldn't initialize rqueue "
"and pages ret=%i", ret);
- goto create_qp_exit3;
+ goto create_qp_exit4;
}
}
@@ -770,7 +780,7 @@ static struct ehca_qp *internal_create_qp(
if (!my_qp->mod_qp_parm) {
ehca_err(pd->device,
"Could not alloc mod_qp_parm");
- goto create_qp_exit4;
+ goto create_qp_exit5;
}
}
}
@@ -780,7 +790,7 @@ static struct ehca_qp *internal_create_qp(
h_ret = ehca_define_sqp(shca, my_qp, init_attr);
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
- goto create_qp_exit5;
+ goto create_qp_exit6;
}
}
@@ -789,7 +799,7 @@ static struct ehca_qp *internal_create_qp(
if (ret) {
ehca_err(pd->device,
"Couldn't assign qp to send_cq ret=%i", ret);
- goto create_qp_exit5;
+ goto create_qp_exit6;
}
}
@@ -815,22 +825,26 @@ static struct ehca_qp *internal_create_qp(
if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
ehca_err(pd->device, "Copy to udata failed");
ret = -EINVAL;
- goto create_qp_exit6;
+ goto create_qp_exit7;
}
}
return my_qp;
-create_qp_exit6:
+create_qp_exit7:
ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
-create_qp_exit5:
+create_qp_exit6:
kfree(my_qp->mod_qp_parm);
-create_qp_exit4:
+create_qp_exit5:
if (HAS_RQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
+create_qp_exit4:
+ if (HAS_SQ(my_qp))
+ vfree(my_qp->sq_map);
+
create_qp_exit3:
if (HAS_SQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
@@ -1534,8 +1548,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_QKEY)
my_qp->qkey = attr->qkey;
- my_qp->state = qp_new_state;
-
modify_qp_exit2:
if (squeue_locked) { /* this means: sqe -> rts */
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1551,6 +1563,8 @@ modify_qp_exit1:
int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
+ int ret = 0;
+
struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
ib_device);
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
@@ -1597,12 +1611,18 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
attr->qp_state, my_qp->init_attr.port_num,
ibqp->qp_type);
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
- return 0;
+ goto out;
}
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
}
- return internal_modify_qp(ibqp, attr, attr_mask, 0);
+ ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
+
+out:
+ if ((ret == 0) && (attr_mask & IB_QP_STATE))
+ my_qp->state = attr->qp_state;
+
+ return ret;
}
void ehca_recover_sqp(struct ib_qp *sqp)
@@ -1973,8 +1993,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
if (HAS_RQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
- if (HAS_SQ(my_qp))
+ if (HAS_SQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
+ vfree(my_qp->sq_map);
+ }
kmem_cache_free(qp_cache, my_qp);
atomic_dec(&shca->num_qps);
return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 898c8b5c38dd..4426d82fe798 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -139,6 +139,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
static inline int ehca_write_swqe(struct ehca_qp *qp,
struct ehca_wqe *wqe_p,
const struct ib_send_wr *send_wr,
+ u32 sq_map_idx,
int hidden)
{
u32 idx;
@@ -157,7 +158,11 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
/* clear wqe header until sglist */
memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
- wqe_p->work_request_id = send_wr->wr_id;
+ wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK;
+ wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK;
+
+ qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK;
+ qp->sq_map[sq_map_idx].reported = 0;
switch (send_wr->opcode) {
case IB_WR_SEND:
@@ -381,6 +386,7 @@ static inline int post_one_send(struct ehca_qp *my_qp,
{
struct ehca_wqe *wqe_p;
int ret;
+ u32 sq_map_idx;
u64 start_offset = my_qp->ipz_squeue.current_q_offset;
/* get pointer next to free WQE */
@@ -393,8 +399,15 @@ static inline int post_one_send(struct ehca_qp *my_qp,
"qp_num=%x", my_qp->ib_qp.qp_num);
return -ENOMEM;
}
+
+ /*
+ * Get the index of the WQE in the send queue. The same index is used
+ * for writing into the sq_map.
+ */
+ sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
+
/* write a SEND WQE into the QUEUE */
- ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
+ ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
/*
* if something failed,
* reset the free entry pointer to the start value
@@ -589,7 +602,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
struct ehca_qp *my_qp;
int cqe_count = 0, is_error;
-poll_cq_one_read_cqe:
+repoll:
cqe = (struct ehca_cqe *)
ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
if (!cqe) {
@@ -617,7 +630,7 @@ poll_cq_one_read_cqe:
ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
my_cq->cq_number, cqe->local_qp_number);
/* ignore this purged cqe */
- goto poll_cq_one_read_cqe;
+ goto repoll;
}
spin_lock_irqsave(&qp->spinlock_s, flags);
purgeflag = qp->sqerr_purgeflag;
@@ -636,7 +649,7 @@ poll_cq_one_read_cqe:
* that caused sqe and turn off purge flag
*/
qp->sqerr_purgeflag = 0;
- goto poll_cq_one_read_cqe;
+ goto repoll;
}
}
@@ -654,8 +667,34 @@ poll_cq_one_read_cqe:
my_cq, my_cq->cq_number);
}
- /* we got a completion! */
- wc->wr_id = cqe->work_request_id;
+ read_lock(&ehca_qp_idr_lock);
+ my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
+ read_unlock(&ehca_qp_idr_lock);
+ if (!my_qp)
+ goto repoll;
+ wc->qp = &my_qp->ib_qp;
+
+ if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) {
+ struct ehca_qmap_entry *qmap_entry;
+ /*
+ * We got a send completion and need to restore the original
+ * wr_id.
+ */
+ qmap_entry = &my_qp->sq_map[cqe->work_request_id &
+ QMAP_IDX_MASK];
+
+ if (qmap_entry->reported) {
+ ehca_warn(cq->device, "Double cqe on qp_num=%#x",
+ my_qp->real_qp_num);
+ /* found a double cqe, discard it and read next one */
+ goto repoll;
+ }
+ wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK;
+ wc->wr_id |= qmap_entry->app_wr_id;
+ qmap_entry->reported = 1;
+ } else
+ /* We got a receive completion. */
+ wc->wr_id = cqe->work_request_id;
/* eval ib_wc_opcode */
wc->opcode = ib_wc_opcode[cqe->optype]-1;
@@ -667,7 +706,7 @@ poll_cq_one_read_cqe:
ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
my_cq, my_cq->cq_number);
/* update also queue adder to throw away this entry!!! */
- goto poll_cq_one_exit0;
+ goto repoll;
}
/* eval ib_wc_status */
@@ -678,11 +717,6 @@ poll_cq_one_read_cqe:
} else
wc->status = IB_WC_SUCCESS;
- read_lock(&ehca_qp_idr_lock);
- my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
- wc->qp = &my_qp->ib_qp;
- read_unlock(&ehca_qp_idr_lock);
-
wc->byte_len = cqe->nr_bytes_transferred;
wc->pkey_index = cqe->pkey_index;
wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7ebc400a4b3d..341ffedafed6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -202,7 +202,7 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev,
dev_kfree_skb_any(rx_ring[i].skb);
}
- kfree(rx_ring);
+ vfree(rx_ring);
}
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
@@ -352,9 +352,14 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
int ret;
int i;
- rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL);
- if (!rx->rx_ring)
+ rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
+ if (!rx->rx_ring) {
+ printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
+ priv->ca->name, ipoib_recvq_size);
return -ENOMEM;
+ }
+
+ memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
t = kmalloc(sizeof *t, GFP_KERNEL);
if (!t) {
@@ -1494,14 +1499,16 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
return;
}
- priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
- GFP_KERNEL);
+ priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
if (!priv->cm.srq_ring) {
printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
+ return;
}
+
+ memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
}
int ipoib_cm_dev_init(struct net_device *dev)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 2d65411f6763..a92d81567559 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -647,6 +647,47 @@ static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
return copy_to_user(p, str, len) ? -EFAULT : len;
}
+#define OLD_KEY_MAX 0x1ff
+static int handle_eviocgbit(struct input_dev *dev, unsigned int cmd, void __user *p, int compat_mode)
+{
+ static unsigned long keymax_warn_time;
+ unsigned long *bits;
+ int len;
+
+ switch (_IOC_NR(cmd) & EV_MAX) {
+
+ case 0: bits = dev->evbit; len = EV_MAX; break;
+ case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
+ case EV_REL: bits = dev->relbit; len = REL_MAX; break;
+ case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
+ case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
+ case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
+ case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
+ case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
+ case EV_SW: bits = dev->swbit; len = SW_MAX; break;
+ default: return -EINVAL;
+ }
+
+ /*
+ * Work around bugs in userspace programs that like to do
+ * EVIOCGBIT(EV_KEY, KEY_MAX) and not realize that 'len'
+ * should be in bytes, not in bits.
+ */
+ if ((_IOC_NR(cmd) & EV_MAX) == EV_KEY && _IOC_SIZE(cmd) == OLD_KEY_MAX) {
+ len = OLD_KEY_MAX;
+ if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000))
+ printk(KERN_WARNING
+ "evdev.c(EVIOCGBIT): Suspicious buffer size %d, "
+ "limiting output to %d bytes. See "
+ "http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n",
+ OLD_KEY_MAX,
+ BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
+ }
+
+ return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
+}
+#undef OLD_KEY_MAX
+
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
@@ -733,26 +774,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
if (_IOC_DIR(cmd) == _IOC_READ) {
- if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) {
-
- unsigned long *bits;
- int len;
-
- switch (_IOC_NR(cmd) & EV_MAX) {
-
- case 0: bits = dev->evbit; len = EV_MAX; break;
- case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
- case EV_REL: bits = dev->relbit; len = REL_MAX; break;
- case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
- case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
- case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
- case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
- case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
- case EV_SW: bits = dev->swbit; len = SW_MAX; break;
- default: return -EINVAL;
- }
- return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
- }
+ if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
+ return handle_eviocgbit(dev, cmd, p, compat_mode);
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0)))
return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd),
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 87d3e7eabffd..6791be81eb29 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -127,6 +127,7 @@ static const struct xpad_device {
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
+ { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", MAP_DPAD_TO_AXES, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", MAP_DPAD_TO_AXES, XTYPE_XBOX },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index be58730e636a..3f48279f2195 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -118,6 +118,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
unsigned int type = button->type ?: EV_KEY;
bdata->input = input;
+ bdata->button = button;
setup_timer(&bdata->timer,
gpio_check_button, (unsigned long)bdata);
@@ -256,7 +257,7 @@ static int gpio_keys_resume(struct platform_device *pdev)
#define gpio_keys_resume NULL
#endif
-struct platform_driver gpio_keys_device_driver = {
+static struct platform_driver gpio_keys_device_driver = {
.probe = gpio_keys_probe,
.remove = __devexit_p(gpio_keys_remove),
.suspend = gpio_keys_suspend,
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 7bbea097cda2..f996546fc443 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -130,6 +130,29 @@ config MOUSE_APPLETOUCH
To compile this driver as a module, choose M here: the
module will be called appletouch.
+config MOUSE_BCM5974
+ tristate "Apple USB BCM5974 Multitouch trackpad support"
+ depends on USB_ARCH_HAS_HCD
+ select USB
+ help
+ Say Y here if you have an Apple USB BCM5974 Multitouch
+ trackpad.
+
+ The BCM5974 is the multitouch trackpad found in the Macbook
+ Air (JAN2008) and Macbook Pro Penryn (FEB2008) laptops.
+
+ It is also found in the IPhone (2007) and Ipod Touch (2008).
+
+ This driver provides multitouch functionality together with
+ the synaptics X11 driver.
+
+ The interface is currently identical to the appletouch interface,
+ for further information, see
+ <file:Documentation/input/appletouch.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm5974.
+
config MOUSE_INPORT
tristate "InPort/MS/ATIXL busmouse"
depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 9e6e36330820..d4d202516090 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
+obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
new file mode 100644
index 000000000000..2ec921bf3c60
--- /dev/null
+++ b/drivers/input/mouse/bcm5974.c
@@ -0,0 +1,681 @@
+/*
+ * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
+ *
+ * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
+ *
+ * The USB initialization and package decoding was made by
+ * Scott Shawcroft as part of the touchd user-space driver project:
+ * Copyright (C) 2008 Scott Shawcroft (scott.shawcroft@gmail.com)
+ *
+ * The BCM5974 driver is based on the appletouch driver:
+ * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2005 Johannes Berg (johannes@sipsolutions.net)
+ * Copyright (C) 2005 Stelian Pop (stelian@popies.net)
+ * Copyright (C) 2005 Frank Arnold (frank@scirocco-5v-turbo.de)
+ * Copyright (C) 2005 Peter Osterlund (petero2@telia.com)
+ * Copyright (C) 2005 Michael Hanselmann (linux-kernel@hansmi.ch)
+ * Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/usb/input.h>
+#include <linux/hid.h>
+#include <linux/mutex.h>
+
+#define USB_VENDOR_ID_APPLE 0x05ac
+
+/* MacbookAir, aka wellspring */
+#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
+#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
+#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
+/* MacbookProPenryn, aka wellspring2 */
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
+
+#define BCM5974_DEVICE(prod) { \
+ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_CLASS | \
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL), \
+ .idVendor = USB_VENDOR_ID_APPLE, \
+ .idProduct = (prod), \
+ .bInterfaceClass = USB_INTERFACE_CLASS_HID, \
+ .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE \
+}
+
+/* table of devices that work with this driver */
+static const struct usb_device_id bcm5974_table [] = {
+ /* MacbookAir1.1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_JIS),
+ /* MacbookProPenryn */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS),
+ /* Terminating entry */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, bcm5974_table);
+
+MODULE_AUTHOR("Henrik Rydberg");
+MODULE_DESCRIPTION("Apple USB BCM5974 multitouch driver");
+MODULE_LICENSE("GPL");
+
+#define dprintk(level, format, a...)\
+ { if (debug >= level) printk(KERN_DEBUG format, ##a); }
+
+static int debug = 1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Activate debugging output");
+
+/* button data structure */
+struct bt_data {
+ u8 unknown1; /* constant */
+ u8 button; /* left button */
+ u8 rel_x; /* relative x coordinate */
+ u8 rel_y; /* relative y coordinate */
+};
+
+/* trackpad header structure */
+struct tp_header {
+ u8 unknown1[16]; /* constants, timers, etc */
+ u8 fingers; /* number of fingers on trackpad */
+ u8 unknown2[9]; /* constants, timers, etc */
+};
+
+/* trackpad finger structure */
+struct tp_finger {
+ __le16 origin; /* left/right origin? */
+ __le16 abs_x; /* absolute x coodinate */
+ __le16 abs_y; /* absolute y coodinate */
+ __le16 rel_x; /* relative x coodinate */
+ __le16 rel_y; /* relative y coodinate */
+ __le16 size_major; /* finger size, major axis? */
+ __le16 size_minor; /* finger size, minor axis? */
+ __le16 orientation; /* 16384 when point, else 15 bit angle */
+ __le16 force_major; /* trackpad force, major axis? */
+ __le16 force_minor; /* trackpad force, minor axis? */
+ __le16 unused[3]; /* zeros */
+ __le16 multi; /* one finger: varies, more fingers: constant */
+};
+
+/* trackpad data structure, empirically at least ten fingers */
+struct tp_data {
+ struct tp_header header;
+ struct tp_finger finger[16];
+};
+
+/* device-specific parameters */
+struct bcm5974_param {
+ int dim; /* logical dimension */
+ int fuzz; /* logical noise value */
+ int devmin; /* device minimum reading */
+ int devmax; /* device maximum reading */
+};
+
+/* device-specific configuration */
+struct bcm5974_config {
+ int ansi, iso, jis; /* the product id of this device */
+ int bt_ep; /* the endpoint of the button interface */
+ int bt_datalen; /* data length of the button interface */
+ int tp_ep; /* the endpoint of the trackpad interface */
+ int tp_datalen; /* data length of the trackpad interface */
+ struct bcm5974_param p; /* finger pressure limits */
+ struct bcm5974_param w; /* finger width limits */
+ struct bcm5974_param x; /* horizontal limits */
+ struct bcm5974_param y; /* vertical limits */
+};
+
+/* logical device structure */
+struct bcm5974 {
+ char phys[64];
+ struct usb_device *udev; /* usb device */
+ struct usb_interface *intf; /* our interface */
+ struct input_dev *input; /* input dev */
+ struct bcm5974_config cfg; /* device configuration */
+ struct mutex pm_mutex; /* serialize access to open/suspend */
+ int opened; /* 1: opened, 0: closed */
+ struct urb *bt_urb; /* button usb request block */
+ struct bt_data *bt_data; /* button transferred data */
+ struct urb *tp_urb; /* trackpad usb request block */
+ struct tp_data *tp_data; /* trackpad transferred data */
+};
+
+/* logical dimensions */
+#define DIM_PRESSURE 256 /* maximum finger pressure */
+#define DIM_WIDTH 16 /* maximum finger width */
+#define DIM_X 1280 /* maximum trackpad x value */
+#define DIM_Y 800 /* maximum trackpad y value */
+
+/* logical signal quality */
+#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
+#define SN_WIDTH 100 /* width signal-to-noise ratio */
+#define SN_COORD 250 /* coordinate signal-to-noise ratio */
+
+/* device constants */
+static const struct bcm5974_config bcm5974_config_table[] = {
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
+ 0x84, sizeof(struct bt_data),
+ 0x81, sizeof(struct tp_data),
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4824, 5342 },
+ { DIM_Y, DIM_Y / SN_COORD, -172, 5820 }
+ },
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING2_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
+ 0x84, sizeof(struct bt_data),
+ 0x81, sizeof(struct tp_data),
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4824, 4824 },
+ { DIM_Y, DIM_Y / SN_COORD, -172, 4290 }
+ },
+ {}
+};
+
+/* return the device-specific configuration by device */
+static const struct bcm5974_config *bcm5974_get_config(struct usb_device *udev)
+{
+ u16 id = le16_to_cpu(udev->descriptor.idProduct);
+ const struct bcm5974_config *cfg;
+
+ for (cfg = bcm5974_config_table; cfg->ansi; ++cfg)
+ if (cfg->ansi == id || cfg->iso == id || cfg->jis == id)
+ return cfg;
+
+ return bcm5974_config_table;
+}
+
+/* convert 16-bit little endian to signed integer */
+static inline int raw2int(__le16 x)
+{
+ return (signed short)le16_to_cpu(x);
+}
+
+/* scale device data to logical dimensions (asserts devmin < devmax) */
+static inline int int2scale(const struct bcm5974_param *p, int x)
+{
+ return x * p->dim / (p->devmax - p->devmin);
+}
+
+/* all logical value ranges are [0,dim). */
+static inline int int2bound(const struct bcm5974_param *p, int x)
+{
+ int s = int2scale(p, x);
+
+ return clamp_val(s, 0, p->dim - 1);
+}
+
+/* setup which logical events to report */
+static void setup_events_to_report(struct input_dev *input_dev,
+ const struct bcm5974_config *cfg)
+{
+ __set_bit(EV_ABS, input_dev->evbit);
+
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, cfg->p.dim, cfg->p.fuzz, 0);
+ input_set_abs_params(input_dev, ABS_TOOL_WIDTH,
+ 0, cfg->w.dim, cfg->w.fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X,
+ 0, cfg->x.dim, cfg->x.fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ 0, cfg->y.dim, cfg->y.fuzz, 0);
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+}
+
+/* report button data as logical button state */
+static int report_bt_state(struct bcm5974 *dev, int size)
+{
+ if (size != sizeof(struct bt_data))
+ return -EIO;
+
+ input_report_key(dev->input, BTN_LEFT, dev->bt_data->button);
+ input_sync(dev->input);
+
+ return 0;
+}
+
+/* report trackpad data as logical trackpad state */
+static int report_tp_state(struct bcm5974 *dev, int size)
+{
+ const struct bcm5974_config *c = &dev->cfg;
+ const struct tp_finger *f = dev->tp_data->finger;
+ struct input_dev *input = dev->input;
+ const int fingers = (size - 26) / 28;
+ int p = 0, w, x, y, n = 0;
+
+ if (size < 26 || (size - 26) % 28 != 0)
+ return -EIO;
+
+ if (fingers) {
+ p = raw2int(f->force_major);
+ w = raw2int(f->size_major);
+ x = raw2int(f->abs_x);
+ y = raw2int(f->abs_y);
+ n = p > 0 ? fingers : 0;
+
+ dprintk(9,
+ "bcm5974: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n",
+ p, w, x, y, n);
+
+ input_report_abs(input, ABS_TOOL_WIDTH, int2bound(&c->w, w));
+ input_report_abs(input, ABS_X, int2bound(&c->x, x - c->x.devmin));
+ input_report_abs(input, ABS_Y, int2bound(&c->y, c->y.devmax - y));
+ }
+
+ input_report_abs(input, ABS_PRESSURE, int2bound(&c->p, p));
+
+ input_report_key(input, BTN_TOOL_FINGER, n == 1);
+ input_report_key(input, BTN_TOOL_DOUBLETAP, n == 2);
+ input_report_key(input, BTN_TOOL_TRIPLETAP, n > 2);
+
+ input_sync(input);
+
+ return 0;
+}
+
+/* Wellspring initialization constants */
+#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
+#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
+#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
+#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
+#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
+
+static int bcm5974_wellspring_mode(struct bcm5974 *dev)
+{
+ char *data = kmalloc(8, GFP_KERNEL);
+ int retval = 0, size;
+
+ if (!data) {
+ err("bcm5974: out of memory");
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ /* read configuration */
+ size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
+ BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+
+ if (size != 8) {
+ err("bcm5974: could not read from device");
+ retval = -EIO;
+ goto out;
+ }
+
+ /* apply the mode switch */
+ data[0] = BCM5974_WELLSPRING_MODE_VENDOR_VALUE;
+
+ /* write configuration */
+ size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
+ BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+
+ if (size != 8) {
+ err("bcm5974: could not write to device");
+ retval = -EIO;
+ goto out;
+ }
+
+ dprintk(2, "bcm5974: switched to wellspring mode.\n");
+
+ out:
+ kfree(data);
+ return retval;
+}
+
+static void bcm5974_irq_button(struct urb *urb)
+{
+ struct bcm5974 *dev = urb->context;
+ int error;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -EOVERFLOW:
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ dbg("bcm5974: button urb shutting down: %d", urb->status);
+ return;
+ default:
+ dbg("bcm5974: button urb status: %d", urb->status);
+ goto exit;
+ }
+
+ if (report_bt_state(dev, dev->bt_urb->actual_length))
+ dprintk(1, "bcm5974: bad button package, length: %d\n",
+ dev->bt_urb->actual_length);
+
+exit:
+ error = usb_submit_urb(dev->bt_urb, GFP_ATOMIC);
+ if (error)
+ err("bcm5974: button urb failed: %d", error);
+}
+
+static void bcm5974_irq_trackpad(struct urb *urb)
+{
+ struct bcm5974 *dev = urb->context;
+ int error;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -EOVERFLOW:
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ dbg("bcm5974: trackpad urb shutting down: %d", urb->status);
+ return;
+ default:
+ dbg("bcm5974: trackpad urb status: %d", urb->status);
+ goto exit;
+ }
+
+ /* control response ignored */
+ if (dev->tp_urb->actual_length == 2)
+ goto exit;
+
+ if (report_tp_state(dev, dev->tp_urb->actual_length))
+ dprintk(1, "bcm5974: bad trackpad package, length: %d\n",
+ dev->tp_urb->actual_length);
+
+exit:
+ error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC);
+ if (error)
+ err("bcm5974: trackpad urb failed: %d", error);
+}
+
+/*
+ * The Wellspring trackpad, like many recent Apple trackpads, share
+ * the usb device with the keyboard. Since keyboards are usually
+ * handled by the HID system, the device ends up being handled by two
+ * modules. Setting up the device therefore becomes slightly
+ * complicated. To enable multitouch features, a mode switch is
+ * required, which is usually applied via the control interface of the
+ * device. It can be argued where this switch should take place. In
+ * some drivers, like appletouch, the switch is made during
+ * probe. However, the hid module may also alter the state of the
+ * device, resulting in trackpad malfunction under certain
+ * circumstances. To get around this problem, there is at least one
+ * example that utilizes the USB_QUIRK_RESET_RESUME quirk in order to
+ * recieve a reset_resume request rather than the normal resume.
+ * Since the implementation of reset_resume is equal to mode switch
+ * plus start_traffic, it seems easier to always do the switch when
+ * starting traffic on the device.
+ */
+static int bcm5974_start_traffic(struct bcm5974 *dev)
+{
+ if (bcm5974_wellspring_mode(dev)) {
+ dprintk(1, "bcm5974: mode switch failed\n");
+ goto error;
+ }
+
+ if (usb_submit_urb(dev->bt_urb, GFP_KERNEL))
+ goto error;
+
+ if (usb_submit_urb(dev->tp_urb, GFP_KERNEL))
+ goto err_kill_bt;
+
+ return 0;
+
+err_kill_bt:
+ usb_kill_urb(dev->bt_urb);
+error:
+ return -EIO;
+}
+
+static void bcm5974_pause_traffic(struct bcm5974 *dev)
+{
+ usb_kill_urb(dev->tp_urb);
+ usb_kill_urb(dev->bt_urb);
+}
+
+/*
+ * The code below implements open/close and manual suspend/resume.
+ * All functions may be called in random order.
+ *
+ * Opening a suspended device fails with EACCES - permission denied.
+ *
+ * Failing a resume leaves the device resumed but closed.
+ */
+static int bcm5974_open(struct input_dev *input)
+{
+ struct bcm5974 *dev = input_get_drvdata(input);
+ int error;
+
+ error = usb_autopm_get_interface(dev->intf);
+ if (error)
+ return error;
+
+ mutex_lock(&dev->pm_mutex);
+
+ error = bcm5974_start_traffic(dev);
+ if (!error)
+ dev->opened = 1;
+
+ mutex_unlock(&dev->pm_mutex);
+
+ if (error)
+ usb_autopm_put_interface(dev->intf);
+
+ return error;
+}
+
+static void bcm5974_close(struct input_dev *input)
+{
+ struct bcm5974 *dev = input_get_drvdata(input);
+
+ mutex_lock(&dev->pm_mutex);
+
+ bcm5974_pause_traffic(dev);
+ dev->opened = 0;
+
+ mutex_unlock(&dev->pm_mutex);
+
+ usb_autopm_put_interface(dev->intf);
+}
+
+static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message)
+{
+ struct bcm5974 *dev = usb_get_intfdata(iface);
+
+ mutex_lock(&dev->pm_mutex);
+
+ if (dev->opened)
+ bcm5974_pause_traffic(dev);
+
+ mutex_unlock(&dev->pm_mutex);
+
+ return 0;
+}
+
+static int bcm5974_resume(struct usb_interface *iface)
+{
+ struct bcm5974 *dev = usb_get_intfdata(iface);
+ int error = 0;
+
+ mutex_lock(&dev->pm_mutex);
+
+ if (dev->opened)
+ error = bcm5974_start_traffic(dev);
+
+ mutex_unlock(&dev->pm_mutex);
+
+ return error;
+}
+
+static int bcm5974_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(iface);
+ const struct bcm5974_config *cfg;
+ struct bcm5974 *dev;
+ struct input_dev *input_dev;
+ int error = -ENOMEM;
+
+ /* find the product index */
+ cfg = bcm5974_get_config(udev);
+
+ /* allocate memory for our device state and initialize it */
+ dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!dev || !input_dev) {
+ err("bcm5974: out of memory");
+ goto err_free_devs;
+ }
+
+ dev->udev = udev;
+ dev->intf = iface;
+ dev->input = input_dev;
+ dev->cfg = *cfg;
+ mutex_init(&dev->pm_mutex);
+
+ /* setup urbs */
+ dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->bt_urb)
+ goto err_free_devs;
+
+ dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->tp_urb)
+ goto err_free_bt_urb;
+
+ dev->bt_data = usb_buffer_alloc(dev->udev,
+ dev->cfg.bt_datalen, GFP_KERNEL,
+ &dev->bt_urb->transfer_dma);
+ if (!dev->bt_data)
+ goto err_free_urb;
+
+ dev->tp_data = usb_buffer_alloc(dev->udev,
+ dev->cfg.tp_datalen, GFP_KERNEL,
+ &dev->tp_urb->transfer_dma);
+ if (!dev->tp_data)
+ goto err_free_bt_buffer;
+
+ usb_fill_int_urb(dev->bt_urb, udev,
+ usb_rcvintpipe(udev, cfg->bt_ep),
+ dev->bt_data, dev->cfg.bt_datalen,
+ bcm5974_irq_button, dev, 1);
+
+ usb_fill_int_urb(dev->tp_urb, udev,
+ usb_rcvintpipe(udev, cfg->tp_ep),
+ dev->tp_data, dev->cfg.tp_datalen,
+ bcm5974_irq_trackpad, dev, 1);
+
+ /* create bcm5974 device */
+ usb_make_path(udev, dev->phys, sizeof(dev->phys));
+ strlcat(dev->phys, "/input0", sizeof(dev->phys));
+
+ input_dev->name = "bcm5974";
+ input_dev->phys = dev->phys;
+ usb_to_input_id(dev->udev, &input_dev->id);
+ input_dev->dev.parent = &iface->dev;
+
+ input_set_drvdata(input_dev, dev);
+
+ input_dev->open = bcm5974_open;
+ input_dev->close = bcm5974_close;
+
+ setup_events_to_report(input_dev, cfg);
+
+ error = input_register_device(dev->input);
+ if (error)
+ goto err_free_buffer;
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(iface, dev);
+
+ return 0;
+
+err_free_buffer:
+ usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
+ dev->tp_data, dev->tp_urb->transfer_dma);
+err_free_bt_buffer:
+ usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
+ dev->bt_data, dev->bt_urb->transfer_dma);
+err_free_urb:
+ usb_free_urb(dev->tp_urb);
+err_free_bt_urb:
+ usb_free_urb(dev->bt_urb);
+err_free_devs:
+ usb_set_intfdata(iface, NULL);
+ input_free_device(input_dev);
+ kfree(dev);
+ return error;
+}
+
+static void bcm5974_disconnect(struct usb_interface *iface)
+{
+ struct bcm5974 *dev = usb_get_intfdata(iface);
+
+ usb_set_intfdata(iface, NULL);
+
+ input_unregister_device(dev->input);
+ usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
+ dev->tp_data, dev->tp_urb->transfer_dma);
+ usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
+ dev->bt_data, dev->bt_urb->transfer_dma);
+ usb_free_urb(dev->tp_urb);
+ usb_free_urb(dev->bt_urb);
+ kfree(dev);
+}
+
+static struct usb_driver bcm5974_driver = {
+ .name = "bcm5974",
+ .probe = bcm5974_probe,
+ .disconnect = bcm5974_disconnect,
+ .suspend = bcm5974_suspend,
+ .resume = bcm5974_resume,
+ .reset_resume = bcm5974_resume,
+ .id_table = bcm5974_table,
+ .supports_autosuspend = 1,
+};
+
+static int __init bcm5974_init(void)
+{
+ return usb_register(&bcm5974_driver);
+}
+
+static void __exit bcm5974_exit(void)
+{
+ usb_deregister(&bcm5974_driver);
+}
+
+module_init(bcm5974_init);
+module_exit(bcm5974_exit);
+
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 66bafe308b0c..692a79ec2a22 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -1,10 +1,11 @@
#ifndef _I8042_SPARCIO_H
#define _I8042_SPARCIO_H
+#include <linux/of_device.h>
+
#include <asm/io.h>
#include <asm/oplib.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
static int i8042_kbd_irq = -1;
static int i8042_aux_irq = -1;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index fe732a574ec2..3282b741e246 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -394,6 +394,13 @@ static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
},
},
+ {
+ .ident = "Acer TravelMate 4280",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 0ed044d5e685..765007899d9a 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -269,8 +269,8 @@ static int xps2_setup(struct device *dev, struct resource *regs_res,
* we have the PS2 in a good state */
out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET);
- dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%08X, irq=%d\n",
- drvdata->phys_addr, (u32)drvdata->base_address, drvdata->irq);
+ dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%p, irq=%d\n",
+ drvdata->phys_addr, drvdata->base_address, drvdata->irq);
serio = &drvdata->serio;
serio->id.type = SERIO_8042;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 6e60a97a234c..25287e80e236 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -249,29 +249,26 @@ config TOUCHSCREEN_WM97XX
config TOUCHSCREEN_WM9705
bool "WM9705 Touchscreen interface support"
depends on TOUCHSCREEN_WM97XX
+ default y
help
- Say Y here if you have a Wolfson Microelectronics WM9705
- touchscreen controller connected to your system.
-
- If unsure, say N.
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9705 touchscreen controller.
config TOUCHSCREEN_WM9712
bool "WM9712 Touchscreen interface support"
depends on TOUCHSCREEN_WM97XX
+ default y
help
- Say Y here if you have a Wolfson Microelectronics WM9712
- touchscreen controller connected to your system.
-
- If unsure, say N.
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9712 touchscreen controller.
config TOUCHSCREEN_WM9713
bool "WM9713 Touchscreen interface support"
depends on TOUCHSCREEN_WM97XX
+ default y
help
- Say Y here if you have a Wolfson Microelectronics WM9713 touchscreen
- controller connected to your system.
-
- If unsure, say N.
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9713 touchscreen controller.
config TOUCHSCREEN_WM97XX_MAINSTONE
tristate "WM97xx Mainstone accelerated touch"
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index d93500f24fbb..81d0c6053447 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -108,9 +108,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
}
/*:*/
-/*M:014 get_pfn is slow; it takes the mmap sem and calls get_user_pages. We
- * could probably try to grab batches of pages here as an optimization
- * (ie. pre-faulting). :*/
+/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
+ * an optimization (ie. pre-faulting). :*/
/*H:350 This routine takes a page number given by the Guest and converts it to
* an actual, physical page number. It can fail for several reasons: the
@@ -123,19 +122,13 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
static unsigned long get_pfn(unsigned long virtpfn, int write)
{
struct page *page;
- /* This value indicates failure. */
- unsigned long ret = -1UL;
- /* get_user_pages() is a complex interface: it gets the "struct
- * vm_area_struct" and "struct page" assocated with a range of pages.
- * It also needs the task's mmap_sem held, and is not very quick.
- * It returns the number of pages it got. */
- down_read(&current->mm->mmap_sem);
- if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT,
- 1, write, 1, &page, NULL) == 1)
- ret = page_to_pfn(page);
- up_read(&current->mm->mmap_sem);
- return ret;
+ /* gup me one page at this address please! */
+ if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
+ return page_to_pfn(page);
+
+ /* This value indicates failure. */
+ return -1UL;
}
/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
@@ -174,7 +167,7 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
/*H:460 And to complete the chain, release_pte() looks like this: */
static void release_pte(pte_t pte)
{
- /* Remember that get_user_pages() took a reference to the page, in
+ /* Remember that get_user_pages_fast() took a reference to the page, in
* get_pfn()? We have to put it back now. */
if (pte_flags(pte) & _PAGE_PRESENT)
put_page(pfn_to_page(pte_pfn(pte)));
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c7aae66c6f9b..8cfadc5bd2ba 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2393,6 +2393,8 @@ static void analyze_sbs(mddev_t * mddev)
}
+static void md_safemode_timeout(unsigned long data);
+
static ssize_t
safe_delay_show(mddev_t *mddev, char *page)
{
@@ -2432,9 +2434,12 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
if (msec == 0)
mddev->safemode_delay = 0;
else {
+ unsigned long old_delay = mddev->safemode_delay;
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
+ if (mddev->safemode_delay < old_delay)
+ md_safemode_timeout((unsigned long)mddev);
}
return len;
}
@@ -4634,6 +4639,11 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
*/
if (mddev->sync_thread)
return -EBUSY;
+ if (mddev->bitmap)
+ /* Sorry, cannot grow a bitmap yet, just remove it,
+ * grow, and re-add.
+ */
+ return -EBUSY;
rdev_for_each(rdev, tmp, mddev) {
sector_t avail;
avail = rdev->size * 2;
@@ -5993,7 +6003,7 @@ static int remove_and_add_spares(mddev_t *mddev)
}
}
- if (mddev->degraded) {
+ if (mddev->degraded && ! mddev->ro) {
rdev_for_each(rdev, rtmp, mddev) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
@@ -6067,6 +6077,8 @@ void md_check_recovery(mddev_t *mddev)
flush_signals(current);
}
+ if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
if ( ! (
(mddev->flags && !mddev->external) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
@@ -6080,6 +6092,15 @@ void md_check_recovery(mddev_t *mddev)
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (mddev->ro) {
+ /* Only thing we do on a ro array is remove
+ * failed devices.
+ */
+ remove_and_add_spares(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ goto unlock;
+ }
+
if (!mddev->external) {
int did_change = 0;
spin_lock_irq(&mddev->write_lock);
@@ -6117,7 +6138,8 @@ void md_check_recovery(mddev_t *mddev)
/* resync has finished, collect result */
md_unregister_thread(mddev->sync_thread);
mddev->sync_thread = NULL;
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev))
@@ -6169,6 +6191,7 @@ void md_check_recovery(mddev_t *mddev)
} else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -6232,7 +6255,11 @@ static int md_notify_reboot(struct notifier_block *this,
for_each_mddev(mddev, tmp)
if (mddev_trylock(mddev)) {
- do_md_stop (mddev, 1, 0);
+ /* Force a switch to readonly even array
+ * appears to still be in use. Hence
+ * the '100'.
+ */
+ do_md_stop (mddev, 1, 100);
mddev_unlock(mddev);
}
/*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d41bebb6da0f..e34cd0e62473 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -76,11 +76,13 @@ static void r10bio_pool_free(void *r10_bio, void *data)
kfree(r10_bio);
}
+/* Maximum size of each resync request */
#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
-#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+/* amount of memory to reserve for resync requests */
+#define RESYNC_WINDOW (1024*1024)
+/* maximum number of concurrent requests, memory permitting */
+#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
/*
* When performing a resync, we need to read and compare, so
@@ -690,7 +692,6 @@ static int flush_pending_writes(conf_t *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-#define RESYNC_DEPTH 32
static void raise_barrier(conf_t *conf, int force)
{
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 40e939675657..224de022e7c5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2568,10 +2568,10 @@ static bool handle_stripe5(struct stripe_head *sh)
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ if (blocked_rdev == NULL &&
+ rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
- break;
}
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -2588,8 +2588,14 @@ static bool handle_stripe5(struct stripe_head *sh)
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ if (s.syncing || s.expanding || s.expanded ||
+ s.to_write || s.written) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto unlock;
+ }
+ /* There is nothing for the blocked_rdev to block */
+ rdev_dec_pending(blocked_rdev, conf->mddev);
+ blocked_rdev = NULL;
}
if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -2832,10 +2838,10 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ if (blocked_rdev == NULL &&
+ rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
- break;
}
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -2853,9 +2859,16 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ if (s.syncing || s.expanding || s.expanded ||
+ s.to_write || s.written) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto unlock;
+ }
+ /* There is nothing for the blocked_rdev to block */
+ rdev_dec_pending(blocked_rdev, conf->mddev);
+ blocked_rdev = NULL;
}
+
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -4446,6 +4459,9 @@ static int raid5_check_reshape(mddev_t *mddev)
return -EINVAL; /* Cannot shrink array or change level yet */
if (mddev->delta_disks == 0)
return 0; /* nothing to do */
+ if (mddev->bitmap)
+ /* Cannot grow a bitmap yet */
+ return -EBUSY;
/* Can only proceed if there are plenty of stripe_heads.
* We need a minimum of one full stripe,, and for sensible progress
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index e7a3fe508dff..b2d9878dc3f0 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -803,11 +803,30 @@ static acpi_status get_u32(u32 *value, u32 cap)
static acpi_status set_u32(u32 value, u32 cap)
{
+ acpi_status status;
+
if (interface->capability & cap) {
switch (interface->type) {
case ACER_AMW0:
return AMW0_set_u32(value, cap, interface);
case ACER_AMW0_V2:
+ if (cap == ACER_CAP_MAILLED)
+ return AMW0_set_u32(value, cap, interface);
+
+ /*
+ * On some models, some WMID methods don't toggle
+ * properly. For those cases, we want to run the AMW0
+ * method afterwards to be certain we've really toggled
+ * the device state.
+ */
+ if (cap == ACER_CAP_WIRELESS ||
+ cap == ACER_CAP_BLUETOOTH) {
+ status = WMID_set_u32(value, cap, interface);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ return AMW0_set_u32(value, cap, interface);
+ }
case ACER_WMID:
return WMID_set_u32(value, cap, interface);
default:
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 4251018f70ff..a78f70deeb59 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -279,7 +279,7 @@ struct gru_stats_s {
#if defined CONFIG_IA64
#define VADDR_HI_BIT 64
#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
-#elif defined __x86_64
+#elif defined CONFIG_X86_64
#define VADDR_HI_BIT 48
#define GRUREGION(addr) (0) /* ZZZ could do better */
#else
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 7c994e1ae276..ae16d845d746 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -595,8 +595,9 @@ static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id,
- int size, enum s3c2410_dma_buffresult result)
+static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch,
+ void *buf_id, int size,
+ enum s3c2410_dma_buffresult result)
{
struct s3cmci_host *host = buf_id;
unsigned long iflags;
@@ -740,8 +741,8 @@ request_done:
mmc_request_done(host->mmc, mrq);
}
-
-void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source)
+static void s3cmci_dma_setup(struct s3cmci_host *host,
+ enum s3c2410_dmasrc source)
{
static enum s3c2410_dmasrc last_source = -1;
static int setup_ok;
@@ -1003,8 +1004,9 @@ static void s3cmci_send_request(struct mmc_host *mmc)
enable_irq(host->irq);
}
-static int s3cmci_card_present(struct s3cmci_host *host)
+static int s3cmci_card_present(struct mmc_host *mmc)
{
+ struct s3cmci_host *host = mmc_priv(mmc);
struct s3c24xx_mci_pdata *pdata = host->pdata;
int ret;
@@ -1023,7 +1025,7 @@ static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->cmd_is_stop = 0;
host->mrq = mrq;
- if (s3cmci_card_present(host) == 0) {
+ if (s3cmci_card_present(mmc) == 0) {
dbg(host, dbg_err, "%s: no medium present\n", __func__);
host->mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
@@ -1138,6 +1140,7 @@ static struct mmc_host_ops s3cmci_ops = {
.request = s3cmci_request,
.set_ios = s3cmci_set_ios,
.get_ro = s3cmci_get_ro,
+ .get_cd = s3cmci_card_present,
};
static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
@@ -1206,7 +1209,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
}
host->base = ioremap(host->mem->start, RESSIZE(host->mem));
- if (host->base == 0) {
+ if (!host->base) {
dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
ret = -EINVAL;
goto probe_free_mem_region;
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index f99e9f721629..1df44d966bdb 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -29,7 +29,6 @@
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/scatterlist.h>
-#include <linux/version.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 64002488c6ee..917cf8d3ae95 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -19,7 +19,7 @@
#include <asm/io.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
-#include <asm/plat-orion/orion_nand.h>
+#include <plat/orion_nand.h>
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = { "cmdlinepart", NULL };
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 4bf4f7b205f2..b468f904c7f8 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -40,20 +40,20 @@
#define DP(__mask, __fmt, __args...) do { \
if (bp->msglevel & (__mask)) \
printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev?(bp->dev->name):"?", ##__args); \
+ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* errors debug print */
#define BNX2X_DBG_ERR(__fmt, __args...) do { \
if (bp->msglevel & NETIF_MSG_PROBE) \
printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev?(bp->dev->name):"?", ##__args); \
+ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* for errors (never masked) */
#define BNX2X_ERR(__fmt, __args...) do { \
printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev?(bp->dev->name):"?", ##__args); \
+ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* before we have a dev->name use dev_info() */
@@ -120,16 +120,8 @@
#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
-#define NIG_WR(reg, val) REG_WR(bp, reg, val)
-#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val)
-#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
-
-
-#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
-
-#define for_each_nondefault_queue(bp, var) \
- for (var = 1; var < bp->num_queues; var++)
-#define is_multi(bp) (bp->num_queues > 1)
+#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
+#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
/* fast path */
@@ -163,7 +155,7 @@ struct sw_rx_page {
#define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
-/* RX_SGE_CNT is promissed to be a power of 2 */
+/* RX_SGE_CNT is promised to be a power of 2 */
#define RX_SGE_MASK (RX_SGE_CNT - 1)
#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE (NUM_RX_SGE - 1)
@@ -258,8 +250,7 @@ struct bnx2x_fastpath {
unsigned long tx_pkt,
rx_pkt,
- rx_calls,
- rx_alloc_failed;
+ rx_calls;
/* TPA related */
struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
@@ -275,6 +266,15 @@ struct bnx2x_fastpath {
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+#define BNX2X_HAS_TX_WORK(fp) \
+ ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
+ (fp->tx_pkt_prod != fp->tx_pkt_cons))
+
+#define BNX2X_HAS_RX_WORK(fp) \
+ (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb))
+
+#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
+
/* MC hsi */
#define MAX_FETCH_BD 13 /* HW max BDs per packet */
@@ -317,7 +317,7 @@ struct bnx2x_fastpath {
#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
-/* This is needed for determening of last_max */
+/* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
#define __SGE_MASK_SET_BIT(el, bit) \
@@ -386,20 +386,28 @@ struct bnx2x_fastpath {
#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
(TPA_TYPE_START | TPA_TYPE_END))
-#define BNX2X_RX_SUM_OK(cqe) \
- (!(cqe->fast_path_cqe.status_flags & \
- (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)))
+#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+#define BNX2X_IP_CSUM_ERR(cqe) \
+ (!((cqe)->fast_path_cqe.status_flags & \
+ ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+ ((cqe)->fast_path_cqe.type_error_flags & \
+ ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+
+#define BNX2X_L4_CSUM_ERR(cqe) \
+ (!((cqe)->fast_path_cqe.status_flags & \
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+ ((cqe)->fast_path_cqe.type_error_flags & \
+ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+
+#define BNX2X_RX_CSUM_OK(cqe) \
+ (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
#define BNX2X_RX_SUM_FIX(cqe) \
((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
(1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
-#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
-
#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
@@ -647,6 +655,8 @@ struct bnx2x_eth_stats {
u32 brb_drop_hi;
u32 brb_drop_lo;
+ u32 brb_truncate_hi;
+ u32 brb_truncate_lo;
u32 jabber_packets_received;
@@ -663,6 +673,9 @@ struct bnx2x_eth_stats {
u32 mac_discard;
u32 driver_xoff;
+ u32 rx_err_discard_pkt;
+ u32 rx_skb_alloc_failed;
+ u32 hw_csum_err;
};
#define STATS_OFFSET32(stat_name) \
@@ -753,7 +766,6 @@ struct bnx2x {
u16 def_att_idx;
u32 attn_state;
struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
- u32 aeu_mask;
u32 nig_mask;
/* slow path ring */
@@ -772,7 +784,7 @@ struct bnx2x {
u8 stats_pending;
u8 set_mac_pending;
- /* End of fileds used in the performance code paths */
+ /* End of fields used in the performance code paths */
int panic;
int msglevel;
@@ -794,9 +806,6 @@ struct bnx2x {
#define BP_FUNC(bp) (bp->func)
#define BP_E1HVN(bp) (bp->func >> 1)
#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
-/* assorted E1HVN */
-#define IS_E1HMF(bp) (bp->e1hmf != 0)
-#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
int pm_cap;
int pcie_cap;
@@ -821,6 +830,7 @@ struct bnx2x {
u32 mf_config;
u16 e1hov;
u8 e1hmf;
+#define IS_E1HMF(bp) (bp->e1hmf != 0)
u8 wol;
@@ -836,7 +846,6 @@ struct bnx2x {
u16 rx_ticks_int;
u16 rx_ticks;
- u32 stats_ticks;
u32 lin_cnt;
int state;
@@ -852,6 +861,7 @@ struct bnx2x {
#define BNX2X_STATE_ERROR 0xf000
int num_queues;
+#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
u32 rx_mode;
#define BNX2X_RX_MODE_NONE 0
@@ -902,10 +912,17 @@ struct bnx2x {
};
+#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
+
+#define for_each_nondefault_queue(bp, var) \
+ for (var = 1; var < bp->num_queues; var++)
+#define is_multi(bp) (bp->num_queues > 1)
+
+
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
@@ -976,7 +993,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED_SHIFT 16
-#define BNX2X_NUM_STATS 39
+#define BNX2X_NUM_STATS 42
#define BNX2X_NUM_TESTS 8
#define BNX2X_MAC_LOOPBACK 0
@@ -1007,10 +1024,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* resolution of the rate shaping timer - 100 usec */
#define RS_PERIODIC_TIMEOUT_USEC 100
/* resolution of fairness algorithm in usecs -
- coefficient for clauclating the actuall t fair */
+ coefficient for calculating the actual t fair */
#define T_FAIR_COEF 10000000
/* number of bytes in single QM arbitration cycle -
- coeffiecnt for calculating the fairness timer */
+ coefficient for calculating the fairness timer */
#define QM_ARB_BYTES 40000
#define FAIR_MEM 2
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index e3da7f69d27b..192fa981b930 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -9,165 +9,171 @@
#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET? 0x7000 : 0x1000)
+ (IS_E1H_OFFSET ? 0x7000 : 0x1000)
#define CSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+ (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \
- * 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \
- * 0x4)))
+ (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \
+ ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \
+ 0x40) + (index * 0x4)))
#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \
- * 0x100)) : (0x1900 + (function * 0x40)))
+ (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \
+ ((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \
- * 0x100)) : (0x1908 + (function * 0x40)))
+ (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \
+ ((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
#define CSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET? 0x11e8 : 0xffffffff)
+ (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
#define CSTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
+ (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
+ (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)))
#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
+ (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)))
#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
+ (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x1400 + (port * 0x280) + (cpu_id * 0x28)))
#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
+ (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x1408 + (port * 0x280) + (cpu_id * 0x28)))
#define CSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \
+ (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
(function * 0x8)))
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
- (IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff)
+ (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff)
#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET? 0xa000 : 0x1000)
+ (IS_E1H_OFFSET ? 0xa000 : 0x1000)
#define TSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+ (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
- (IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \
- (0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
+ (IS_E1H_OFFSET ? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) \
+ : (0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \
- 0x4)))
+ (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
+ 0x28) + (index * 0x4)))
#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1400 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1408 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \
+ (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \
(function * 0x8)))
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
- (IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \
+ (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \
(function * 0x38)))
#define TSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET? 0x1ad0 : 0xffffffff)
+ (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff)
#define TSTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+ (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
- (IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \
+ (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
(function * 0x80)))
#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
- (IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \
+ (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \
(function * 0x38)))
+#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
+ (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \
+ 0x50)) : (0x4000 + (port * 0x3f0) + (stats_counter_id * 0x38)))
#define TSTORM_RX_PRODS_OFFSET(port, client_id) \
- (IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \
- (0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
+ (IS_E1H_OFFSET ? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) \
+ : (0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
#define TSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \
+ (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \
(function * 0x8)))
-#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20)
-#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10)
-#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200)
+#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3b30 : 0x1c20)
+#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10)
+#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200)
#define USTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET? 0x8000 : 0x1000)
+ (IS_E1H_OFFSET ? 0x8000 : 0x1000)
#define USTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+ (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
- (IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
+ (IS_E1H_OFFSET ? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
(0x5450 + (port * 0x1c8) + (clientId * 0x18)))
#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \
- 0x4)))
+ (IS_E1H_OFFSET ? (0x951a + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0) + (index * 0x4)) : (0x191a + (function * \
+ 0x28) + (index * 0x4)))
#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1900 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1900 + (function * 0x28)))
#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1908 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1908 + (function * 0x28)))
#define USTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET? 0x2448 : 0xffffffff)
+ (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
#define USTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
+ (IS_E1H_OFFSET ? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
- (IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
+ (IS_E1H_OFFSET ? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
(0x5448 + (port * 0x1c8) + (clientId * 0x18)))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \
+ (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5408 + \
(function * 0x8)))
#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
+ (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)))
#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
+ (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)))
#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
+ (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x1400 + (port * 0x280) + (cpu_id * 0x28)))
#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
+ (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x1408 + (port * 0x280) + (cpu_id * 0x28)))
#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET? 0x9000 : 0x1000)
+ (IS_E1H_OFFSET ? 0x9000 : 0x1000)
#define XSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+ (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
- (IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
+ (IS_E1H_OFFSET ? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \
- 0x4)))
+ (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
+ 0x28) + (index * 0x4)))
#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1400 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1408 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
#define XSTORM_E1HOV_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff)
+ (IS_E1H_OFFSET ? (0x2ab8 + (function * 0x2)) : 0xffffffff)
#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \
+ (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \
(function * 0x8)))
#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \
+ (IS_E1H_OFFSET ? (0x2568 + (function * 0x70)) : (0x3c60 + \
(function * 0x70)))
#define XSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET? 0x2ac8 : 0xffffffff)
+ (IS_E1H_OFFSET ? 0x2ac8 : 0xffffffff)
#define XSTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+ (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
+ (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \
+ 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38)))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \
+ (IS_E1H_OFFSET ? (0x2528 + (function * 0x70)) : (0x3c20 + \
(function * 0x70)))
#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \
+ (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
(function * 0x10)))
#define XSTORM_SPQ_PROD_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \
+ (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
(function * 0x10)))
#define XSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \
+ (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \
(function * 0x8)))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index d3e8198d7dba..efd764427fa1 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1268,7 +1268,7 @@ struct doorbell {
/*
- * IGU driver acknowlegement register
+ * IGU driver acknowledgement register
*/
struct igu_ack_register {
#if defined(__BIG_ENDIAN)
@@ -1882,7 +1882,7 @@ struct timers_block_context {
};
/*
- * structure for easy accessability to assembler
+ * structure for easy accessibility to assembler
*/
struct eth_tx_bd_flags {
u8 as_bitfield;
@@ -2044,7 +2044,7 @@ struct eth_context {
/*
- * ethernet doorbell
+ * Ethernet doorbell
*/
struct eth_tx_doorbell {
#if defined(__BIG_ENDIAN)
@@ -2256,7 +2256,7 @@ struct ramrod_data {
};
/*
- * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits)
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
*/
union eth_ramrod_data {
struct ramrod_data general;
@@ -2330,7 +2330,7 @@ struct spe_hdr {
};
/*
- * ethernet slow path element
+ * Ethernet slow path element
*/
union eth_specific_data {
u8 protocol_data[8];
@@ -2343,7 +2343,7 @@ union eth_specific_data {
};
/*
- * ethernet slow path element
+ * Ethernet slow path element
*/
struct eth_spe {
struct spe_hdr hdr;
@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers {
/*
- * common flag to indicate existance of TPA.
+ * common flag to indicate existence of TPA.
*/
struct tstorm_eth_tpa_exist {
#if defined(__BIG_ENDIAN)
@@ -2765,7 +2765,7 @@ struct tstorm_common_stats {
};
/*
- * Eth statistics query sturcture for the eth_stats_quesry ramrod
+ * Eth statistics query structure for the eth_stats_query ramrod
*/
struct eth_stats_query {
struct xstorm_common_stats xstorm_common;
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 4c7750789b62..130927cfc75b 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -72,26 +72,26 @@
struct raw_op {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
u32 raw_data;
};
struct op_read {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
u32 pad;
};
struct op_write {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
u32 val;
};
struct op_string_write {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
#ifdef __LITTLE_ENDIAN
u16 data_off;
u16 data_len;
@@ -102,8 +102,8 @@ struct op_string_write {
};
struct op_zero {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
u32 len;
};
@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
/*********************************************************
There are different blobs for each PRAM section.
In addition, each blob write operation is divided into a few operations
- in order to decrease the amount of phys. contigious buffer needed.
+ in order to decrease the amount of phys. contiguous buffer needed.
Thus, when we select a blob the address may be with some offset
from the beginning of PRAM section.
The same holds for the INT_TABLE sections.
@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
len = op->str_wr.data_len;
data = data_base + op->str_wr.data_off;
- /* carefull! it must be in order */
+ /* careful! it must be in order */
if (unlikely(op_type > OP_WB)) {
/* If E1 only */
@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc)
return crc_res;
}
-/* regiesers addresses are not in order
+/* registers addresses are not in order
so these arrays help simplify the code */
static const int cm_start[E1H_FUNC_MAX][9] = {
{MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
diff --git a/drivers/net/bnx2x_init_values.h b/drivers/net/bnx2x_init_values.h
index 63019055e4bb..9755bf6b08dd 100644
--- a/drivers/net/bnx2x_init_values.h
+++ b/drivers/net/bnx2x_init_values.h
@@ -901,31 +901,28 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x400},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c00, 0x2},
+ {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x20278},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2c00 + 0x8, 0x20278},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2},
+ {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027a},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x2027a},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027c},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298},
{OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027e},
+ {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027c},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a},
{OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028e},
+ {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028c},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa},
{OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000},
{OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000},
- {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029e},
+ {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029c},
{OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba},
{OP_ZP_E1, USEM_REG_PRAM, 0x311c0000},
{OP_ZP_E1H, USEM_REG_PRAM, 0x31070000},
@@ -933,11 +930,11 @@ static const struct raw_op init_ops[] = {
{OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42},
{OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919},
{OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906},
- {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x500402a0},
+ {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x5004029e},
{OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d},
{OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc},
-#define USEM_COMMON_END 790
-#define USEM_PORT0_START 790
+#define USEM_COMMON_END 787
+#define USEM_PORT0_START 787
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa},
@@ -950,44 +947,27 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x20},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3100, 0x20},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3200, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3300, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3400, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3500, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3600, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3700, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3800, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3900, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c10, 0x2},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc},
-#define USEM_PORT0_END 838
-#define USEM_PORT1_START 838
+#define USEM_PORT0_END 818
+#define USEM_PORT1_START 818
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa},
@@ -1000,76 +980,59 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3080, 0x20},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3180, 0x20},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3280, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3380, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3480, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3580, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3680, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3780, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3880, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3980, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c20, 0x2},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc},
-#define USEM_PORT1_END 886
-#define USEM_FUNC0_START 886
+#define USEM_PORT1_END 849
+#define USEM_FUNC0_START 849
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2},
-#define USEM_FUNC0_END 888
-#define USEM_FUNC1_START 888
+#define USEM_FUNC0_END 851
+#define USEM_FUNC1_START 851
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2},
-#define USEM_FUNC1_END 890
-#define USEM_FUNC2_START 890
+#define USEM_FUNC1_END 853
+#define USEM_FUNC2_START 853
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2},
-#define USEM_FUNC2_END 892
-#define USEM_FUNC3_START 892
+#define USEM_FUNC2_END 855
+#define USEM_FUNC3_START 855
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2},
-#define USEM_FUNC3_END 894
-#define USEM_FUNC4_START 894
+#define USEM_FUNC3_END 857
+#define USEM_FUNC4_START 857
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2},
-#define USEM_FUNC4_END 896
-#define USEM_FUNC5_START 896
+#define USEM_FUNC4_END 859
+#define USEM_FUNC5_START 859
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2},
-#define USEM_FUNC5_END 898
-#define USEM_FUNC6_START 898
+#define USEM_FUNC5_END 861
+#define USEM_FUNC6_START 861
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2},
-#define USEM_FUNC6_END 900
-#define USEM_FUNC7_START 900
+#define USEM_FUNC6_END 863
+#define USEM_FUNC7_START 863
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2},
-#define USEM_FUNC7_END 902
-#define CSEM_COMMON_START 902
+#define USEM_FUNC7_END 865
+#define CSEM_COMMON_START 865
{OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0},
{OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0},
{OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1128,29 +1091,29 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a2},
+ {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240},
{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be},
{OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002aa},
+ {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002a8},
{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de},
{OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ba},
+ {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002b8},
{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee},
{OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000},
{OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000},
- {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002ca},
+ {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002c8},
{OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe},
{OP_ZP_E1, CSEM_REG_PRAM, 0x32580000},
{OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000},
{OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96},
{OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f},
- {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402cc},
+ {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402ca},
{OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300},
-#define CSEM_COMMON_END 981
-#define CSEM_PORT0_START 981
+#define CSEM_COMMON_END 944
+#define CSEM_PORT0_START 944
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10},
@@ -1163,8 +1126,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30},
-#define CSEM_PORT0_END 993
-#define CSEM_PORT1_START 993
+#define CSEM_PORT0_END 956
+#define CSEM_PORT1_START 956
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10},
@@ -1177,43 +1140,43 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30},
-#define CSEM_PORT1_END 1005
-#define CSEM_FUNC0_START 1005
+#define CSEM_PORT1_END 968
+#define CSEM_FUNC0_START 968
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2},
-#define CSEM_FUNC0_END 1007
-#define CSEM_FUNC1_START 1007
+#define CSEM_FUNC0_END 970
+#define CSEM_FUNC1_START 970
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2},
-#define CSEM_FUNC1_END 1009
-#define CSEM_FUNC2_START 1009
+#define CSEM_FUNC1_END 972
+#define CSEM_FUNC2_START 972
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2},
-#define CSEM_FUNC2_END 1011
-#define CSEM_FUNC3_START 1011
+#define CSEM_FUNC2_END 974
+#define CSEM_FUNC3_START 974
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2},
-#define CSEM_FUNC3_END 1013
-#define CSEM_FUNC4_START 1013
+#define CSEM_FUNC3_END 976
+#define CSEM_FUNC4_START 976
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2},
-#define CSEM_FUNC4_END 1015
-#define CSEM_FUNC5_START 1015
+#define CSEM_FUNC4_END 978
+#define CSEM_FUNC5_START 978
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2},
-#define CSEM_FUNC5_END 1017
-#define CSEM_FUNC6_START 1017
+#define CSEM_FUNC5_END 980
+#define CSEM_FUNC6_START 980
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2},
-#define CSEM_FUNC6_END 1019
-#define CSEM_FUNC7_START 1019
+#define CSEM_FUNC6_END 982
+#define CSEM_FUNC7_START 982
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2},
-#define CSEM_FUNC7_END 1021
-#define XPB_COMMON_START 1021
+#define CSEM_FUNC7_END 984
+#define XPB_COMMON_START 984
{OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20},
-#define XPB_COMMON_END 1022
-#define DQ_COMMON_START 1022
+#define XPB_COMMON_END 985
+#define DQ_COMMON_START 985
{OP_WR, DORQ_REG_MODE_ACT, 0x2},
{OP_WR, DORQ_REG_NORM_CID_OFST, 0x3},
{OP_WR, DORQ_REG_OUTST_REQ, 0x4},
@@ -1232,8 +1195,8 @@ static const struct raw_op init_ops[] = {
{OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c},
{OP_WR, DORQ_REG_REGN, 0x7c1004},
{OP_WR, DORQ_REG_IF_EN, 0xf},
-#define DQ_COMMON_END 1040
-#define TIMERS_COMMON_START 1040
+#define DQ_COMMON_END 1003
+#define TIMERS_COMMON_START 1003
{OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2},
{OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c},
{OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1},
@@ -1256,14 +1219,14 @@ static const struct raw_op init_ops[] = {
{OP_WR, TM_REG_EN_CL0_INPUT, 0x1},
{OP_WR, TM_REG_EN_CL1_INPUT, 0x1},
{OP_WR, TM_REG_EN_CL2_INPUT, 0x1},
-#define TIMERS_COMMON_END 1062
-#define TIMERS_PORT0_START 1062
+#define TIMERS_COMMON_END 1025
+#define TIMERS_PORT0_START 1025
{OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2},
-#define TIMERS_PORT0_END 1063
-#define TIMERS_PORT1_START 1063
+#define TIMERS_PORT0_END 1026
+#define TIMERS_PORT1_START 1026
{OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2},
-#define TIMERS_PORT1_END 1064
-#define XSDM_COMMON_START 1064
+#define TIMERS_PORT1_END 1027
+#define XSDM_COMMON_START 1027
{OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614},
{OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424},
{OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600},
@@ -1311,8 +1274,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8},
{OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1},
{OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa},
-#define XSDM_COMMON_END 1111
-#define QM_COMMON_START 1111
+#define XSDM_COMMON_END 1074
+#define QM_COMMON_START 1074
{OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6},
{OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5},
{OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa},
@@ -1613,8 +1576,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5},
{OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7},
{OP_WR, QM_REG_CMINTEN, 0xff},
-#define QM_COMMON_END 1411
-#define PBF_COMMON_START 1411
+#define QM_COMMON_END 1374
+#define PBF_COMMON_START 1374
{OP_WR, PBF_REG_INIT, 0x1},
{OP_WR, PBF_REG_INIT_P4, 0x1},
{OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1},
@@ -1622,20 +1585,20 @@ static const struct raw_op init_ops[] = {
{OP_WR, PBF_REG_INIT_P4, 0x0},
{OP_WR, PBF_REG_INIT, 0x0},
{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0},
-#define PBF_COMMON_END 1418
-#define PBF_PORT0_START 1418
+#define PBF_COMMON_END 1381
+#define PBF_PORT0_START 1381
{OP_WR, PBF_REG_INIT_P0, 0x1},
{OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1},
{OP_WR, PBF_REG_INIT_P0, 0x0},
{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0},
-#define PBF_PORT0_END 1422
-#define PBF_PORT1_START 1422
+#define PBF_PORT0_END 1385
+#define PBF_PORT1_START 1385
{OP_WR, PBF_REG_INIT_P1, 0x1},
{OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1},
{OP_WR, PBF_REG_INIT_P1, 0x0},
{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0},
-#define PBF_PORT1_END 1426
-#define XCM_COMMON_START 1426
+#define PBF_PORT1_END 1389
+#define XCM_COMMON_START 1389
{OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32},
{OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020},
{OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020},
@@ -1670,7 +1633,7 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f},
{OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20},
{OP_ZR, XCM_REG_XX_TABLE, 0x12},
- {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02ce},
+ {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02cc},
{OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302},
{OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf},
{OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7},
@@ -1700,8 +1663,8 @@ static const struct raw_op init_ops[] = {
{OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1},
{OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1},
{OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1},
-#define XCM_COMMON_END 1490
-#define XCM_PORT0_START 1490
+#define XCM_COMMON_END 1453
+#define XCM_PORT0_START 1453
{OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1710,8 +1673,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2},
{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
-#define XCM_PORT0_END 1498
-#define XCM_PORT1_START 1498
+#define XCM_PORT0_END 1461
+#define XCM_PORT1_START 1461
{OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1720,8 +1683,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2},
{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
-#define XCM_PORT1_END 1506
-#define XCM_FUNC0_START 1506
+#define XCM_PORT1_END 1469
+#define XCM_FUNC0_START 1469
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1731,8 +1694,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC0_END 1515
-#define XCM_FUNC1_START 1515
+#define XCM_FUNC0_END 1478
+#define XCM_FUNC1_START 1478
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1742,8 +1705,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC1_END 1524
-#define XCM_FUNC2_START 1524
+#define XCM_FUNC1_END 1487
+#define XCM_FUNC2_START 1487
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1753,8 +1716,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC2_END 1533
-#define XCM_FUNC3_START 1533
+#define XCM_FUNC2_END 1496
+#define XCM_FUNC3_START 1496
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1764,8 +1727,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC3_END 1542
-#define XCM_FUNC4_START 1542
+#define XCM_FUNC3_END 1505
+#define XCM_FUNC4_START 1505
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1775,8 +1738,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC4_END 1551
-#define XCM_FUNC5_START 1551
+#define XCM_FUNC4_END 1514
+#define XCM_FUNC5_START 1514
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1786,8 +1749,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC5_END 1560
-#define XCM_FUNC6_START 1560
+#define XCM_FUNC5_END 1523
+#define XCM_FUNC6_START 1523
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1797,8 +1760,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC6_END 1569
-#define XCM_FUNC7_START 1569
+#define XCM_FUNC6_END 1532
+#define XCM_FUNC7_START 1532
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1808,8 +1771,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC7_END 1578
-#define XSEM_COMMON_START 1578
+#define XCM_FUNC7_END 1541
+#define XSEM_COMMON_START 1541
{OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0},
{OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0},
{OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1876,9 +1839,9 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202ed},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202eb},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ef},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ed},
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321},
@@ -1886,29 +1849,29 @@ static const struct raw_op init_ops[] = {
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0},
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f3},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f1},
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2},
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f5},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f3},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337},
{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f7},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f5},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339},
{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80307},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80305},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349},
{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030f},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030d},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351},
{OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000},
{OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000},
- {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130317},
+ {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130315},
{OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359},
{OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000},
{OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000},
@@ -1918,10 +1881,10 @@ static const struct raw_op init_ops[] = {
{OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22},
{OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2},
{OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8},
- {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60319},
+ {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60317},
{OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b},
-#define XSEM_COMMON_END 1688
-#define XSEM_PORT0_START 1688
+#define XSEM_COMMON_END 1651
+#define XSEM_PORT0_START 1651
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c},
@@ -1934,7 +1897,7 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x10031b},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x100319},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc},
@@ -1950,12 +1913,12 @@ static const struct raw_op init_ops[] = {
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x2032b},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20329},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4},
-#define XSEM_PORT0_END 1720
-#define XSEM_PORT1_START 1720
+#define XSEM_PORT0_END 1683
+#define XSEM_PORT1_START 1683
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c},
@@ -1968,7 +1931,7 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032d},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032b},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc},
@@ -1984,65 +1947,65 @@ static const struct raw_op init_ops[] = {
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033d},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033b},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4},
-#define XSEM_PORT1_END 1752
-#define XSEM_FUNC0_START 1752
+#define XSEM_PORT1_END 1715
+#define XSEM_FUNC0_START 1715
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe},
-#define XSEM_FUNC0_END 1755
-#define XSEM_FUNC1_START 1755
+#define XSEM_FUNC0_END 1718
+#define XSEM_FUNC1_START 1718
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe},
-#define XSEM_FUNC1_END 1758
-#define XSEM_FUNC2_START 1758
+#define XSEM_FUNC1_END 1721
+#define XSEM_FUNC2_START 1721
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe},
-#define XSEM_FUNC2_END 1761
-#define XSEM_FUNC3_START 1761
+#define XSEM_FUNC2_END 1724
+#define XSEM_FUNC3_START 1724
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe},
-#define XSEM_FUNC3_END 1764
-#define XSEM_FUNC4_START 1764
+#define XSEM_FUNC3_END 1727
+#define XSEM_FUNC4_START 1727
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe},
-#define XSEM_FUNC4_END 1767
-#define XSEM_FUNC5_START 1767
+#define XSEM_FUNC4_END 1730
+#define XSEM_FUNC5_START 1730
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe},
-#define XSEM_FUNC5_END 1770
-#define XSEM_FUNC6_START 1770
+#define XSEM_FUNC5_END 1733
+#define XSEM_FUNC6_START 1733
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe},
-#define XSEM_FUNC6_END 1773
-#define XSEM_FUNC7_START 1773
+#define XSEM_FUNC6_END 1736
+#define XSEM_FUNC7_START 1736
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe},
-#define XSEM_FUNC7_END 1776
-#define CDU_COMMON_START 1776
+#define XSEM_FUNC7_END 1739
+#define CDU_COMMON_START 1739
{OP_WR, CDU_REG_CDU_CONTROL0, 0x1},
{OP_WR_E1H, CDU_REG_MF_MODE, 0x1},
{OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000},
{OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d},
- {OP_WB_E1, CDU_REG_L1TT, 0x200033f},
+ {OP_WB_E1, CDU_REG_L1TT, 0x200033d},
{OP_WB_E1H, CDU_REG_L1TT, 0x20003e1},
- {OP_WB_E1, CDU_REG_MATT, 0x20053f},
+ {OP_WB_E1, CDU_REG_MATT, 0x20053d},
{OP_WB_E1H, CDU_REG_MATT, 0x2805e1},
{OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2},
- {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055f},
+ {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055d},
{OP_ZR, CDU_REG_MATT + 0xa0, 0x18},
-#define CDU_COMMON_END 1787
-#define DMAE_COMMON_START 1787
+#define CDU_COMMON_END 1750
+#define DMAE_COMMON_START 1750
{OP_ZR, DMAE_REG_CMD_MEM, 0xe0},
{OP_WR, DMAE_REG_CRC16C_INIT, 0x0},
{OP_WR, DMAE_REG_CRC16T10_INIT, 0x1},
@@ -2050,24 +2013,24 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2},
{OP_WR, DMAE_REG_PCI_IFEN, 0x1},
{OP_WR, DMAE_REG_GRC_IFEN, 0x1},
-#define DMAE_COMMON_END 1794
-#define PXP_COMMON_START 1794
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50565},
+#define DMAE_COMMON_END 1757
+#define PXP_COMMON_START 1757
+ {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50563},
{OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609},
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x5056a},
+ {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50568},
{OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e},
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056f},
-#define PXP_COMMON_END 1799
-#define CFC_COMMON_START 1799
+ {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056d},
+#define PXP_COMMON_END 1762
+#define CFC_COMMON_START 1762
{OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100},
{OP_WR, CFC_REG_CONTROL0, 0x10},
{OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff},
{OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a},
-#define CFC_COMMON_END 1803
-#define HC_COMMON_START 1803
+#define CFC_COMMON_END 1766
+#define HC_COMMON_START 1766
{OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4},
-#define HC_COMMON_END 1804
-#define HC_PORT0_START 1804
+#define HC_COMMON_END 1767
+#define HC_PORT0_START 1767
{OP_WR_E1, HC_REG_CONFIG_0, 0x1080},
{OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2},
{OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2086,8 +2049,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_PORT0_END 1822
-#define HC_PORT1_START 1822
+#define HC_PORT0_END 1785
+#define HC_PORT1_START 1785
{OP_WR_E1, HC_REG_CONFIG_1, 0x1080},
{OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2},
{OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2106,8 +2069,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_PORT1_END 1840
-#define HC_FUNC0_START 1840
+#define HC_PORT1_END 1803
+#define HC_FUNC0_START 1803
{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0},
{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2123,8 +2086,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC0_END 1855
-#define HC_FUNC1_START 1855
+#define HC_FUNC0_END 1818
+#define HC_FUNC1_START 1818
{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1},
{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2140,8 +2103,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC1_END 1870
-#define HC_FUNC2_START 1870
+#define HC_FUNC1_END 1833
+#define HC_FUNC2_START 1833
{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2},
{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2157,8 +2120,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC2_END 1885
-#define HC_FUNC3_START 1885
+#define HC_FUNC2_END 1848
+#define HC_FUNC3_START 1848
{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3},
{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2174,8 +2137,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC3_END 1900
-#define HC_FUNC4_START 1900
+#define HC_FUNC3_END 1863
+#define HC_FUNC4_START 1863
{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4},
{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2191,8 +2154,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC4_END 1915
-#define HC_FUNC5_START 1915
+#define HC_FUNC4_END 1878
+#define HC_FUNC5_START 1878
{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5},
{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2208,8 +2171,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC5_END 1930
-#define HC_FUNC6_START 1930
+#define HC_FUNC5_END 1893
+#define HC_FUNC6_START 1893
{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6},
{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2225,8 +2188,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC6_END 1945
-#define HC_FUNC7_START 1945
+#define HC_FUNC6_END 1908
+#define HC_FUNC7_START 1908
{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7},
{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2242,8 +2205,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC7_END 1960
-#define PXP2_COMMON_START 1960
+#define HC_FUNC7_END 1923
+#define PXP2_COMMON_START 1923
{OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340},
{OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1},
{OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10},
@@ -2361,8 +2324,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1},
{OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1},
{OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340},
-#define PXP2_COMMON_END 2077
-#define MISC_AEU_COMMON_START 2077
+#define PXP2_COMMON_END 2040
+#define MISC_AEU_COMMON_START 2040
{OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16},
{OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000},
{OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555},
@@ -2382,8 +2345,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0},
{OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00},
{OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3},
-#define MISC_AEU_COMMON_END 2096
-#define MISC_AEU_PORT0_START 2096
+#define MISC_AEU_COMMON_END 2059
+#define MISC_AEU_PORT0_START 2059
{OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000},
{OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000},
{OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef},
@@ -2416,8 +2379,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0},
{OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3},
{OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7},
-#define MISC_AEU_PORT0_END 2128
-#define MISC_AEU_PORT1_START 2128
+#define MISC_AEU_PORT0_END 2091
+#define MISC_AEU_PORT1_START 2091
{OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000},
{OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000},
{OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef},
@@ -2450,7 +2413,7 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0},
{OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3},
{OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7},
-#define MISC_AEU_PORT1_END 2160
+#define MISC_AEU_PORT1_END 2123
};
@@ -2560,103 +2523,92 @@ static const u32 init_data_e1[] = {
0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80,
0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280,
0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780,
- 0x000ddb00, 0x00001900, 0x00000028, 0x00000000, 0x00100000, 0x00000000,
- 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+ 0x000ddb00, 0x00001900, 0x00100000, 0x00000000, 0x00000000, 0xffffffff,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
- 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x00001000, 0x00002080,
- 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380,
- 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680,
- 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980,
- 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80,
- 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000,
- 0x00010001, 0x00000604, 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201,
- 0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
+ 0x00000000, 0x00003500, 0x00001000, 0x00002080, 0x00003100, 0x00004180,
+ 0x00005200, 0x00006280, 0x00007300, 0x00008380, 0x00009400, 0x0000a480,
+ 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 0x0000f700, 0x00010780,
+ 0x00011800, 0x00012880, 0x00013900, 0x00014980, 0x00015a00, 0x00016a80,
+ 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 0x0001bd00, 0x0001cd80,
+ 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 0x00010001, 0x00000604,
+ 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 0xcccccccc, 0x00000000,
+ 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000,
- 0x00007ff8, 0x00000000, 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff,
+ 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000,
+ 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
+ 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x00100000, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
- 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
- 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
- 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
- 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
- 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
- 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
- 0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
+ 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
+ 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
- 0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c,
+ 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
- 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
+ 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
- 0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
- 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7,
+ 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c,
+ 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
- 0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
- 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c,
+ 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
- 0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c,
+ 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
- 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
- 0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
- 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a,
+ 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c,
+ 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
+ 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
+ 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
+ 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
+ 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
+ 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
+ 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
+ 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff,
0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c,
@@ -2678,16 +2630,27 @@ static const u32 init_data_e1[] = {
0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c,
0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000,
- 0x00070100, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280,
- 0x00010370, 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0,
- 0x00010200, 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170,
- 0x000b8198, 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000,
- 0x00080100, 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298,
- 0x00080380, 0x00028000, 0x000b8028, 0x000200e0, 0x00010100, 0x00008110,
- 0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000,
- 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc,
- 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000
+ 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
+ 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
+ 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+ 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
+ 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
+ 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+ 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
+ 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
+ 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+ 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170,
+ 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370, 0x00080000,
+ 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200, 0x00070210,
+ 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198, 0x00020250,
+ 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100, 0x00028180,
+ 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380, 0x00028000,
+ 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 0x00000118, 0xcccccccc,
+ 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc,
+ 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc,
+ 0xcccccccc, 0x00002000
};
static const u32 init_data_e1h[] = {
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff2743db10d9..8b92c6ad0759 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -31,17 +31,16 @@
/********************************************************/
#define SUPPORT_CL73 0 /* Currently no */
-#define ETH_HLEN 14
+#define ETH_HLEN 14
#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
#define BMAC_CONTROL_RX_ENABLE 2
-#define MAX_MTU_SIZE 5000
/***********************************************************/
-/* Shortcut definitions */
+/* Shortcut definitions */
/***********************************************************/
#define NIG_STATUS_XGXS0_LINK10G \
@@ -80,12 +79,12 @@
#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
-#define AUTONEG_PARALLEL \
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL \
SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
-#define AUTONEG_SGMII_FIBER_AUTODET \
+#define AUTONEG_SGMII_FIBER_AUTODET \
SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
-#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
@@ -202,11 +201,10 @@ static void bnx2x_emac_init(struct link_params *params,
/* init emac - use read-modify-write */
/* self clear reset */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
timeout = 200;
- do
- {
+ do {
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
if (!timeout) {
@@ -214,18 +212,18 @@ static void bnx2x_emac_init(struct link_params *params,
return;
}
timeout--;
- }while (val & EMAC_MODE_RESET);
+ } while (val & EMAC_MODE_RESET);
/* Set mac address */
val = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
val = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
params->mac_addr[5]);
- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
}
static u8 bnx2x_emac_enable(struct link_params *params,
@@ -286,7 +284,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
if (CHIP_REV_IS_SLOW(bp)) {
/* config GMII mode */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(EMAC_REG_EMAC_MODE,
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE,
(val | EMAC_MODE_PORT_GMII));
} else { /* ASIC */
/* pause enable/disable */
@@ -298,17 +296,19 @@ static u8 bnx2x_emac_enable(struct link_params *params,
EMAC_RX_MODE_FLOW_EN);
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_EXT_PAUSE_EN);
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
if (vars->flow_ctrl & FLOW_CTRL_TX)
bnx2x_bits_en(bp, emac_base +
EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_EXT_PAUSE_EN);
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
}
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
- EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
/* Set Loopback */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
@@ -316,10 +316,10 @@ static u8 bnx2x_emac_enable(struct link_params *params,
val |= 0x810;
else
val &= ~0x810;
- EMAC_WR(EMAC_REG_EMAC_MODE, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
/* enable emac for jumbo packets */
- EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
@@ -591,9 +591,9 @@ void bnx2x_link_status_update(struct link_params *params,
vars->flow_ctrl &= ~FLOW_CTRL_RX;
if (vars->phy_flags & PHY_XGXS_FLAG) {
- if (params->req_line_speed &&
- ((params->req_line_speed == SPEED_10) ||
- (params->req_line_speed == SPEED_100))) {
+ if (vars->line_speed &&
+ ((vars->line_speed == SPEED_10) ||
+ (vars->line_speed == SPEED_100))) {
vars->phy_flags |= PHY_SGMII_FLAG;
} else {
vars->phy_flags &= ~PHY_SGMII_FLAG;
@@ -645,7 +645,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
- u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+ u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
/* Only if the bmac is out of reset */
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -670,7 +670,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
u8 port = params->port;
u32 init_crd, crd;
u32 count = 1000;
- u32 pause = 0;
/* disable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
@@ -693,33 +692,25 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return -EINVAL;
}
- if (flow_ctrl & FLOW_CTRL_RX)
- pause = 1;
- REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
- if (pause) {
+ if (flow_ctrl & FLOW_CTRL_RX ||
+ line_speed == SPEED_10 ||
+ line_speed == SPEED_100 ||
+ line_speed == SPEED_1000 ||
+ line_speed == SPEED_2500) {
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
/* update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
/* update init credit */
- init_crd = 778; /* (800-18-4) */
+ init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVREHEAD)/16;
-
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
/* update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
/* update init credit */
switch (line_speed) {
- case SPEED_10:
- case SPEED_100:
- case SPEED_1000:
- init_crd = thresh + 55 - 22;
- break;
-
- case SPEED_2500:
- init_crd = thresh + 138 - 22;
- break;
-
case SPEED_10000:
init_crd = thresh + 553 - 22;
break;
@@ -764,10 +755,10 @@ static u32 bnx2x_get_emac_base(u32 ext_phy_type, u8 port)
emac_base = GRCBASE_EMAC0;
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- emac_base = (port) ? GRCBASE_EMAC0: GRCBASE_EMAC1;
+ emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
break;
default:
- emac_base = (port) ? GRCBASE_EMAC1: GRCBASE_EMAC0;
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
break;
}
return emac_base;
@@ -1044,7 +1035,7 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
}
static void bnx2x_set_parallel_detection(struct link_params *params,
- u8 phy_flags)
+ u8 phy_flags)
{
struct bnx2x *bp = params->bp;
u16 control2;
@@ -1114,7 +1105,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
/* CL37 Autoneg Enabled */
- if (params->req_line_speed == SPEED_AUTO_NEG)
+ if (vars->line_speed == SPEED_AUTO_NEG)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
else /* CL37 Autoneg Disabled */
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
@@ -1132,7 +1123,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
- if (params->req_line_speed == SPEED_AUTO_NEG)
+ if (vars->line_speed == SPEED_AUTO_NEG)
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
@@ -1148,7 +1139,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
MDIO_REG_BANK_BAM_NEXT_PAGE,
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
&reg_val);
- if (params->req_line_speed == SPEED_AUTO_NEG) {
+ if (vars->line_speed == SPEED_AUTO_NEG) {
/* Enable BAM aneg Mode and TetonII aneg Mode */
reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
@@ -1164,7 +1155,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
reg_val);
/* Enable Clause 73 Aneg */
- if ((params->req_line_speed == SPEED_AUTO_NEG) &&
+ if ((vars->line_speed == SPEED_AUTO_NEG) &&
(SUPPORT_CL73)) {
/* Enable BAM Station Manager */
@@ -1226,7 +1217,8 @@ static void bnx2x_set_autoneg(struct link_params *params,
}
/* program SerDes, forced speed */
-static void bnx2x_program_serdes(struct link_params *params)
+static void bnx2x_program_serdes(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
@@ -1248,28 +1240,35 @@ static void bnx2x_program_serdes(struct link_params *params)
/* program speed
- needed only if the speed is greater than 1G (2.5G or 10G) */
- if (!((params->req_line_speed == SPEED_1000) ||
- (params->req_line_speed == SPEED_100) ||
- (params->req_line_speed == SPEED_10))) {
- CL45_RD_OVER_CL22(bp, params->port,
+ CL45_RD_OVER_CL22(bp, params->port,
params->phy_addr,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
- /* clearing the speed value before setting the right speed */
- reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
+ /* clearing the speed value before setting the right speed */
+ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
+
+ reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+ if (!((vars->line_speed == SPEED_1000) ||
+ (vars->line_speed == SPEED_100) ||
+ (vars->line_speed == SPEED_10))) {
+
reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
- if (params->req_line_speed == SPEED_10000)
+ if (vars->line_speed == SPEED_10000)
reg_val |=
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
- if (params->req_line_speed == SPEED_13000)
+ if (vars->line_speed == SPEED_13000)
reg_val |=
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
- CL45_WR_OVER_CL22(bp, params->port,
+ }
+
+ CL45_WR_OVER_CL22(bp, params->port,
params->phy_addr,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, reg_val);
- }
+
}
static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
@@ -1295,48 +1294,49 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
MDIO_OVER_1G_UP3, 0);
}
-static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
- u32 *ieee_fc)
+static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
{
- struct bnx2x *bp = params->bp;
- /* for AN, we are always publishing full duplex */
- u16 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-
+ *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
/* resolve pause mode and advertisement
* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
switch (params->req_flow_ctrl) {
case FLOW_CTRL_AUTO:
- if (params->mtu <= MAX_MTU_SIZE) {
- an_adv |=
+ if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) {
+ *ieee_fc |=
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
} else {
- an_adv |=
+ *ieee_fc |=
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
}
break;
case FLOW_CTRL_TX:
- an_adv |=
+ *ieee_fc |=
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case FLOW_CTRL_RX:
case FLOW_CTRL_BOTH:
- an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
break;
case FLOW_CTRL_NONE:
default:
- an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
break;
}
+}
- *ieee_fc = an_adv;
+static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
+ u32 ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ /* for AN, we are always publishing full duplex */
CL45_WR_OVER_CL22(bp, params->port,
params->phy_addr,
MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc);
}
static void bnx2x_restart_autoneg(struct link_params *params)
@@ -1382,7 +1382,8 @@ static void bnx2x_restart_autoneg(struct link_params *params)
}
}
-static void bnx2x_initialize_sgmii_process(struct link_params *params)
+static void bnx2x_initialize_sgmii_process(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 control1;
@@ -1406,7 +1407,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
control1);
/* if forced speed */
- if (!(params->req_line_speed == SPEED_AUTO_NEG)) {
+ if (!(vars->line_speed == SPEED_AUTO_NEG)) {
/* set speed, disable autoneg */
u16 mii_control;
@@ -1419,7 +1420,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
- switch (params->req_line_speed) {
+ switch (vars->line_speed) {
case SPEED_100:
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
@@ -1433,8 +1434,8 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
break;
default:
/* invalid speed for SGMII */
- DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
- params->req_line_speed);
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
break;
}
@@ -1460,20 +1461,20 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
*/
static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
-{
- switch (pause_result) { /* ASYM P ASYM P */
- case 0xb: /* 1 0 1 1 */
+{ /* LD LP */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
vars->flow_ctrl = FLOW_CTRL_TX;
break;
- case 0xe: /* 1 1 1 0 */
+ case 0xe: /* 1 1 1 0 */
vars->flow_ctrl = FLOW_CTRL_RX;
break;
- case 0x5: /* 0 1 0 1 */
- case 0x7: /* 0 1 1 1 */
- case 0xd: /* 1 1 0 1 */
- case 0xf: /* 1 1 1 1 */
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
vars->flow_ctrl = FLOW_CTRL_BOTH;
break;
@@ -1531,6 +1532,28 @@ static u8 bnx2x_ext_phy_resove_fc(struct link_params *params,
DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
pause_result);
bnx2x_pause_resolve(vars, pause_result);
+ if (vars->flow_ctrl == FLOW_CTRL_NONE &&
+ ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
+ bnx2x_cl45_read(bp, port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+ bnx2x_cl45_read(bp, port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+ bnx2x_pause_resolve(vars, pause_result);
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
+ pause_result);
+ }
}
return ret;
}
@@ -1541,8 +1564,8 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
u32 gp_status)
{
struct bnx2x *bp = params->bp;
- u16 ld_pause; /* local driver */
- u16 lp_pause; /* link partner */
+ u16 ld_pause; /* local driver */
+ u16 lp_pause; /* link partner */
u16 pause_result;
vars->flow_ctrl = FLOW_CTRL_NONE;
@@ -1573,13 +1596,10 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
(bnx2x_ext_phy_resove_fc(params, vars))) {
return;
} else {
- vars->flow_ctrl = params->req_flow_ctrl;
- if (vars->flow_ctrl == FLOW_CTRL_AUTO) {
- if (params->mtu <= MAX_MTU_SIZE)
- vars->flow_ctrl = FLOW_CTRL_BOTH;
- else
- vars->flow_ctrl = FLOW_CTRL_TX;
- }
+ if (params->req_flow_ctrl == FLOW_CTRL_AUTO)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else
+ vars->flow_ctrl = params->req_flow_ctrl;
}
DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
}
@@ -1590,6 +1610,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
u32 gp_status)
{
struct bnx2x *bp = params->bp;
+
u8 rc = 0;
vars->link_status = 0;
@@ -1690,7 +1711,11 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
vars->link_status |= LINK_STATUS_SERDES_LINK;
- if (params->req_line_speed == SPEED_AUTO_NEG) {
+ if ((params->req_line_speed == SPEED_AUTO_NEG) &&
+ ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
+ (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) {
vars->autoneg = AUTO_NEG_ENABLED;
if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
@@ -1705,18 +1730,18 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
}
if (vars->flow_ctrl & FLOW_CTRL_TX)
- vars->link_status |=
- LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+ vars->link_status |=
+ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
if (vars->flow_ctrl & FLOW_CTRL_RX)
- vars->link_status |=
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+ vars->link_status |=
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
} else { /* link_down */
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
- vars->line_speed = 0;
+
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = FLOW_CTRL_NONE;
vars->autoneg = AUTO_NEG_DISABLED;
@@ -1817,15 +1842,15 @@ static u8 bnx2x_emac_program(struct link_params *params,
}
/*****************************************************************************/
-/* External Phy section */
+/* External Phy section */
/*****************************************************************************/
-static void bnx2x_hw_reset(struct bnx2x *bp)
+static void bnx2x_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
msleep(1);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
static void bnx2x_ext_phy_reset(struct link_params *params,
@@ -1854,10 +1879,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
/* HW reset */
- bnx2x_hw_reset(bp);
+ bnx2x_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
@@ -1869,7 +1895,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
/* Unset Low Power Mode and SW reset */
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
DP(NETIF_MSG_LINK, "XGXS 8072\n");
bnx2x_cl45_write(bp, params->port,
@@ -1887,19 +1914,14 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
DP(NETIF_MSG_LINK, "XGXS 8073\n");
- bnx2x_cl45_write(bp,
- params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
}
break;
@@ -1908,10 +1930,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
/* HW reset */
- bnx2x_hw_reset(bp);
+ bnx2x_hw_reset(bp, params->port);
break;
@@ -1934,7 +1957,7 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
DP(NETIF_MSG_LINK, "SerDes 5482\n");
- bnx2x_hw_reset(bp);
+ bnx2x_hw_reset(bp, params->port);
break;
default:
@@ -2098,42 +2121,45 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
}
-static void bnx2x_bcm8073_external_rom_boot(struct link_params *params)
+static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
+ u8 ext_phy_addr)
{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = ((params->ext_phy_config &
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- u16 fw_ver1, fw_ver2, val;
- /* Need to wait 100ms after reset */
- msleep(100);
- /* Boot port from external ROM */
+ u16 fw_ver1, fw_ver2;
+ /* Boot port from external ROM */
/* EDC grst */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
0x0001);
/* ucode reboot and rst */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
0x008c);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
/* Release srst bit */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
@@ -2142,35 +2168,52 @@ static void bnx2x_bcm8073_external_rom_boot(struct link_params *params)
msleep(100);
/* Clear ser_boot_ctl bit */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_MISC_CTRL1, 0x0000);
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
- /* Only set bit 10 = 1 (Tx power down) */
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+}
+static void bnx2x_bcm807x_force_10G(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u8 ext_phy_addr = ((params->ext_phy_config &
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+
+ /* Force KR or KX */
bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val | 1<<10));
-
- msleep(600);
- /* Release bit 10 (Release Tx power down) */
+ MDIO_PMA_REG_CTRL,
+ 0x2040);
bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
-
+ MDIO_PMA_REG_10G_CTRL2,
+ 0x000b);
+ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_BCM_CTRL,
+ 0x0000);
+ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CTRL,
+ 0x0000);
}
-
static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
{
struct bnx2x *bp = params->bp;
@@ -2236,32 +2279,51 @@ static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
}
-static void bnx2x_bcm807x_force_10G(struct link_params *params)
+
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+ struct link_vars *vars)
{
+
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ u16 cl37_val;
u8 ext_phy_addr = ((params->ext_phy_config &
PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* Force KR or KX */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 0x2040);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2,
- 0x000b);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_BCM_CTRL,
- 0x0000);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ }
+ DP(NETIF_MSG_LINK,
+ "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL,
- 0x0000);
+ MDIO_AN_REG_CL37_FC_LD, cl37_val);
+ msleep(500);
}
static void bnx2x_ext_phy_set_pause(struct link_params *params,
@@ -2282,13 +2344,16 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
- if (vars->ieee_fc &
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
- if (vars->ieee_fc &
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
val |=
MDIO_AN_REG_ADV_PAUSE_PAUSE;
@@ -2302,6 +2367,65 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
MDIO_AN_REG_ADV_PAUSE, val);
}
+
+static void bnx2x_init_internal_phy(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+ u16 bank, rx_eq;
+
+ rx_eq = ((params->serdes_config &
+ PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
+ PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
+
+ DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
+ for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
+ bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
+ CL45_WR_OVER_CL22(bp, port,
+ params->phy_addr,
+ bank ,
+ MDIO_RX0_RX_EQ_BOOST,
+ ((rx_eq &
+ MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
+ MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
+ }
+
+ /* forced speed requested? */
+ if (vars->line_speed != SPEED_AUTO_NEG) {
+ DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+ /* disable autoneg */
+ bnx2x_set_autoneg(params, vars);
+
+ /* program speed and duplex */
+ bnx2x_program_serdes(params, vars);
+
+ } else { /* AN_mode */
+ DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+
+ /* AN enabled */
+ bnx2x_set_brcm_cl37_advertisment(params);
+
+ /* program duplex & pause advertisement (for aneg) */
+ bnx2x_set_ieee_aneg_advertisment(params,
+ vars->ieee_fc);
+
+ /* enable autoneg */
+ bnx2x_set_autoneg(params, vars);
+
+ /* enable and restart AN */
+ bnx2x_restart_autoneg(params);
+ }
+
+ } else { /* SGMII mode */
+ DP(NETIF_MSG_LINK, "SGMII\n");
+
+ bnx2x_initialize_sgmii_process(params, vars);
+ }
+}
+
static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
@@ -2343,7 +2467,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
switch (ext_phy_type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "XGXS Direct\n");
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
@@ -2419,7 +2542,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FD,
+ MDIO_AN_REG_CL37_FC_LP,
0x0020);
/* Enable CL37 AN */
bnx2x_cl45_write(bp, params->port,
@@ -2458,54 +2581,43 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
rx_alarm_ctrl_val = 0x400;
lasi_ctrl_val = 0x0004;
} else {
- /* In 8073, port1 is directed through emac0 and
- * port0 is directed through emac1
- */
rx_alarm_ctrl_val = (1<<2);
- /*lasi_ctrl_val = 0x0005;*/
lasi_ctrl_val = 0x0004;
}
- /* Wait for soft reset to get cleared upto 1 sec */
- for (cnt = 0; cnt < 1000; cnt++) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- &ctrl);
- if (!(ctrl & (1<<15)))
- break;
- msleep(1);
- }
- DP(NETIF_MSG_LINK,
- "807x control reg 0x%x (after %d ms)\n",
- ctrl, cnt);
+ /* enable LASI */
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM_CTRL,
+ rx_alarm_ctrl_val);
+
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LASI_CTRL,
+ lasi_ctrl_val);
+
+ bnx2x_8073_set_pause_cl37(params, vars);
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){
bnx2x_bcm8072_external_rom_boot(params);
} else {
- bnx2x_bcm8073_external_rom_boot(params);
+
/* In case of 8073 with long xaui lines,
don't set the 8073 xaui low power*/
bnx2x_bcm8073_set_xaui_low_power_mode(params);
}
- /* enable LASI */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- rx_alarm_ctrl_val);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL,
- lasi_ctrl_val);
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ 0xca13,
+ &tmp1);
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
@@ -2519,12 +2631,21 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
/* If this is forced speed, set to KR or KX
* (all other are not supported)
*/
- if (!(params->req_line_speed == SPEED_AUTO_NEG)) {
- if (params->req_line_speed == SPEED_10000) {
- bnx2x_bcm807x_force_10G(params);
- DP(NETIF_MSG_LINK,
- "Forced speed 10G on 807X\n");
- break;
+ if (params->loopback_mode == LOOPBACK_EXT) {
+ bnx2x_bcm807x_force_10G(params);
+ DP(NETIF_MSG_LINK,
+ "Forced speed 10G on 807X\n");
+ break;
+ } else {
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type, ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_BCM_CTRL,
+ 0x0002);
+ }
+ if (params->req_line_speed != SPEED_AUTO_NEG) {
+ if (params->req_line_speed == SPEED_10000) {
+ val = (1<<7);
} else if (params->req_line_speed ==
SPEED_2500) {
val = (1<<5);
@@ -2539,11 +2660,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= (1<<7);
+ /* Note that 2.5G works only when
+ used with 1G advertisment */
if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
val |= (1<<5);
- DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
- /*val = ((1<<5)|(1<<7));*/
+ DP(NETIF_MSG_LINK,
+ "807x autoneg val = 0x%x\n", val);
}
bnx2x_cl45_write(bp, params->port,
@@ -2554,20 +2678,19 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- /* Disable 2.5Ghz */
+
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
0x8329, &tmp1);
-/* SUPPORT_SPEED_CAPABILITY
- (Due to the nature of the link order, its not
- possible to enable 2.5G within the autoneg
- capabilities)
- if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
-*/
- if (params->req_line_speed == SPEED_2500) {
+
+ if (((params->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+ (params->req_line_speed ==
+ SPEED_AUTO_NEG)) ||
+ (params->req_line_speed ==
+ SPEED_2500)) {
u16 phy_ver;
/* Allow 2.5G for A1 and above */
bnx2x_cl45_read(bp, params->port,
@@ -2575,49 +2698,53 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
ext_phy_addr,
MDIO_PMA_DEVAD,
0xc801, &phy_ver);
-
+ DP(NETIF_MSG_LINK, "Add 2.5G\n");
if (phy_ver > 0)
tmp1 |= 1;
else
tmp1 &= 0xfffe;
- }
- else
+ } else {
+ DP(NETIF_MSG_LINK, "Disable 2.5G\n");
tmp1 &= 0xfffe;
+ }
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
0x8329, tmp1);
}
- /* Add support for CL37 (passive mode) I */
- bnx2x_cl45_write(bp, params->port,
+
+ /* Add support for CL37 (passive mode) II */
+
+ bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_CL73, 0x040c);
- /* Add support for CL37 (passive mode) II */
+ MDIO_AN_REG_CL37_FC_LD,
+ &tmp1);
+
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FD, 0x20);
+ MDIO_AN_REG_CL37_FC_LD, (tmp1 |
+ ((params->req_duplex == DUPLEX_FULL) ?
+ 0x20 : 0x40)));
+
/* Add support for CL37 (passive mode) III */
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
MDIO_AN_REG_CL37_AN, 0x1000);
- /* Restart autoneg */
- msleep(500);
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
-
- /* The SNR will improve about 2db by changing the
+ /* The SNR will improve about 2db by changing
BW and FEE main tap. Rest commands are executed
after link is up*/
- /* Change FFE main cursor to 5 in EDC register */
+ /*Change FFE main cursor to 5 in EDC register*/
if (bnx2x_8073_is_snr_needed(params))
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
@@ -2626,25 +2753,28 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
MDIO_PMA_REG_EDC_FFE_MAIN,
0xFB0C);
- /* Enable FEC (Forware Error Correction)
- Request in the AN */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV2, &tmp1);
+ /* Enable FEC (Forware Error Correction)
+ Request in the AN */
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV2, &tmp1);
- tmp1 |= (1<<15);
+ tmp1 |= (1<<15);
+
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV2, tmp1);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV2, tmp1);
}
bnx2x_ext_phy_set_pause(params, vars);
+ /* Restart autoneg */
+ msleep(500);
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
ext_phy_addr,
@@ -2701,10 +2831,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
}
} else { /* SerDes */
-/* ext_phy_addr = ((bp->ext_phy_config &
- PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
- PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
-*/
+
ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
switch (ext_phy_type) {
case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
@@ -2726,7 +2853,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u32 ext_phy_type;
@@ -2767,6 +2894,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_PMA_REG_RX_SD, &rx_sd);
DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
ext_phy_link_up = (rx_sd & 0x1);
+ if (ext_phy_link_up)
+ vars->line_speed = SPEED_10000;
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
@@ -2810,6 +2939,13 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
*/
ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
(val2 & (1<<1)));
+ if (ext_phy_link_up) {
+ if (val2 & (1<<1))
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+ }
+
/* clear LASI indication*/
bnx2x_cl45_read(bp, params->port, ext_phy_type,
ext_phy_addr,
@@ -2820,6 +2956,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
{
+ u16 link_status = 0;
+ u16 an1000_status = 0;
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
bnx2x_cl45_read(bp, params->port,
@@ -2846,14 +2984,9 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_LASI_STATUS, &val1);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val2);
DP(NETIF_MSG_LINK,
- "8703 LASI status 0x%x->0x%x\n",
- val1, val2);
+ "8703 LASI status 0x%x\n",
+ val1);
}
/* clear the interrupt LASI status register */
@@ -2869,20 +3002,23 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_PCS_REG_STATUS, &val1);
DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
val2, val1);
- /* Check the LASI */
+ /* Clear MSG-OUT */
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &val2);
+ 0xca13,
+ &val1);
+
+ /* Check the LASI */
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM,
- &val1);
- DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
- val2, val1);
+ MDIO_PMA_REG_RX_ALARM, &val2);
+
+ DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
/* Check the link status */
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
@@ -2905,29 +3041,29 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- u16 an1000_status = 0;
+
if (ext_phy_link_up &&
- (
- (params->req_line_speed != SPEED_10000)
- )) {
+ ((params->req_line_speed !=
+ SPEED_10000))) {
if (bnx2x_bcm8073_xaui_wa(params)
!= 0) {
ext_phy_link_up = 0;
break;
}
- bnx2x_cl45_read(bp, params->port,
+ }
+ bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
- MDIO_XS_DEVAD,
+ MDIO_AN_DEVAD,
0x8304,
&an1000_status);
- bnx2x_cl45_read(bp, params->port,
+ bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
- MDIO_XS_DEVAD,
+ MDIO_AN_DEVAD,
0x8304,
&an1000_status);
- }
+
/* Check the link status on 1.1.2 */
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
@@ -2943,8 +3079,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
"an_link_status=0x%x\n",
val2, val1, an1000_status);
- ext_phy_link_up = (((val1 & 4) == 4) ||
- (an1000_status & (1<<1)));
+ ext_phy_link_up = (((val1 & 4) == 4) ||
+ (an1000_status & (1<<1)));
if (ext_phy_link_up &&
bnx2x_8073_is_snr_needed(params)) {
/* The SNR will improve about 2dbby
@@ -2968,8 +3104,74 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_PMA_REG_CDR_BANDWIDTH,
0x0333);
+
+ }
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ 0xc820,
+ &link_status);
+
+ /* Bits 0..2 --> speed detected,
+ bits 13..15--> link is down */
+ if ((link_status & (1<<2)) &&
+ (!(link_status & (1<<15)))) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 10G\n", params->port);
+ } else if ((link_status & (1<<1)) &&
+ (!(link_status & (1<<14)))) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_2500;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 2.5G\n", params->port);
+ } else if ((link_status & (1<<0)) &&
+ (!(link_status & (1<<13)))) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 1G\n", params->port);
+ } else {
+ ext_phy_link_up = 0;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " is down\n", params->port);
+ }
+ } else {
+ /* See if 1G link is up for the 8072 */
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ 0x8304,
+ &an1000_status);
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ 0x8304,
+ &an1000_status);
+ if (an1000_status & (1<<1)) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 1G\n", params->port);
+ } else if (ext_phy_link_up) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 10G\n", params->port);
}
}
+
+
break;
}
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
@@ -3006,6 +3208,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_AN_DEVAD,
MDIO_AN_REG_MASTER_STATUS,
&val2);
+ vars->line_speed = SPEED_10000;
DP(NETIF_MSG_LINK,
"SFX7101 AN status 0x%x->Master=%x\n",
val2,
@@ -3100,7 +3303,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
* link management
*/
static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u16 is_10g)
+ struct link_vars *vars, u8 is_10g)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -3181,7 +3384,8 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
}
-static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
+static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr,
+ u32 ext_phy_type)
{
u32 cnt = 0;
u16 ctrl = 0;
@@ -3192,12 +3396,14 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
/* take ext phy out of reset */
bnx2x_set_gpio(bp,
- MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_HIGH);
+ MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
bnx2x_set_gpio(bp,
- MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_HIGH);
+ MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
/* wait for 5ms */
msleep(5);
@@ -3205,7 +3411,7 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
for (cnt = 0; cnt < 1000; cnt++) {
msleep(1);
bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL,
@@ -3217,13 +3423,17 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
}
}
-static void bnx2x_turn_off_sf(struct bnx2x *bp)
+static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
{
/* put sf to reset */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_LOW);
bnx2x_set_gpio(bp,
- MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_LOW);
+ MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_LOW,
+ port);
+ bnx2x_set_gpio(bp,
+ MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_LOW,
+ port);
}
u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
@@ -3253,7 +3463,8 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
/* Take ext phy out of reset */
if (!driver_loaded)
- bnx2x_turn_on_sf(bp, params->port, ext_phy_addr);
+ bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
+ ext_phy_type);
/* wait for 1ms */
msleep(1);
@@ -3276,11 +3487,16 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
version[4] = '\0';
if (!driver_loaded)
- bnx2x_turn_off_sf(bp);
+ bnx2x_turn_off_sf(bp, params->port);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
{
+ /* Take ext phy out of reset */
+ if (!driver_loaded)
+ bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
+ ext_phy_type);
+
bnx2x_cl45_read(bp, params->port, ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
@@ -3333,7 +3549,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params,
struct bnx2x *bp = params->bp;
if (is_10g) {
- u32 md_devad;
+ u32 md_devad;
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
@@ -3553,6 +3769,8 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
u16 hw_led_mode, u32 chip_id)
{
u8 rc = 0;
+ u32 tmp;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
speed, hw_led_mode);
@@ -3561,6 +3779,9 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
SHARED_HW_CFG_LED_MAC1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
break;
case LED_MODE_OPER:
@@ -3572,6 +3793,10 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
LED_BLINK_RATE_VAL);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
port*4, 1);
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp & (~EMAC_LED_OVERRIDE)));
+
if (!CHIP_IS_E1H(bp) &&
((speed == SPEED_2500) ||
(speed == SPEED_1000) ||
@@ -3622,7 +3847,8 @@ static u8 bnx2x_link_initialize(struct link_params *params,
struct bnx2x *bp = params->bp;
u8 port = params->port;
u8 rc = 0;
-
+ u8 non_ext_phy;
+ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
/* Activate the external PHY */
bnx2x_ext_phy_reset(params, vars);
@@ -3644,10 +3870,6 @@ static u8 bnx2x_link_initialize(struct link_params *params,
bnx2x_set_swap_lanes(params);
}
- /* Set Parallel Detect */
- if (params->req_line_speed == SPEED_AUTO_NEG)
- bnx2x_set_parallel_detection(params, vars->phy_flags);
-
if (vars->phy_flags & PHY_XGXS_FLAG) {
if (params->req_line_speed &&
((params->req_line_speed == SPEED_100) ||
@@ -3657,68 +3879,33 @@ static u8 bnx2x_link_initialize(struct link_params *params,
vars->phy_flags &= ~PHY_SGMII_FLAG;
}
}
+ /* In case of external phy existance, the line speed would be the
+ line speed linked up by the external phy. In case it is direct only,
+ then the line_speed during initialization will be equal to the
+ req_line_speed*/
+ vars->line_speed = params->req_line_speed;
- if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
- u16 bank, rx_eq;
-
- rx_eq = ((params->serdes_config &
- PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
- PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
+ bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
- DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
- for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
- bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
- CL45_WR_OVER_CL22(bp, port,
- params->phy_addr,
- bank ,
- MDIO_RX0_RX_EQ_BOOST,
- ((rx_eq &
- MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
- MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
- }
-
- /* forced speed requested? */
- if (params->req_line_speed != SPEED_AUTO_NEG) {
- DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
-
- /* disable autoneg */
- bnx2x_set_autoneg(params, vars);
-
- /* program speed and duplex */
- bnx2x_program_serdes(params);
- vars->ieee_fc =
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
-
- } else { /* AN_mode */
- DP(NETIF_MSG_LINK, "not SGMII, AN\n");
-
- /* AN enabled */
- bnx2x_set_brcm_cl37_advertisment(params);
-
- /* program duplex & pause advertisement (for aneg) */
- bnx2x_set_ieee_aneg_advertisment(params,
- &vars->ieee_fc);
-
- /* enable autoneg */
- bnx2x_set_autoneg(params, vars);
-
- /* enable and restart AN */
- bnx2x_restart_autoneg(params);
- }
-
- } else { /* SGMII mode */
- DP(NETIF_MSG_LINK, "SGMII\n");
-
- bnx2x_initialize_sgmii_process(params);
+ /* init ext phy and enable link state int */
+ non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
+ (params->loopback_mode == LOOPBACK_XGXS_10) ||
+ (params->loopback_mode == LOOPBACK_EXT_PHY));
+
+ if (non_ext_phy ||
+ (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) {
+ if (params->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_set_parallel_detection(params, vars->phy_flags);
+ bnx2x_init_internal_phy(params, vars);
}
- /* init ext phy and enable link state int */
- rc |= bnx2x_ext_phy_init(params, vars);
+ if (!non_ext_phy)
+ rc |= bnx2x_ext_phy_init(params, vars);
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
return rc;
@@ -3730,15 +3917,23 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
struct bnx2x *bp = params->bp;
u32 val;
- DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+ DP(NETIF_MSG_LINK, "Phy Initialization started \n");
DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n",
params->req_line_speed, params->req_flow_ctrl);
vars->link_status = 0;
+ vars->phy_link_up = 0;
+ vars->link_up = 0;
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+
if (params->switch_cfg == SWITCH_CFG_1G)
vars->phy_flags = PHY_SERDES_FLAG;
else
vars->phy_flags = PHY_XGXS_FLAG;
+
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -3894,6 +4089,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
}
bnx2x_link_initialize(params, vars);
+ msleep(30);
bnx2x_link_int_enable(params);
}
return 0;
@@ -3943,39 +4139,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
/* HW reset */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
DP(NETIF_MSG_LINK, "reset external PHY\n");
- } else {
-
- u8 ext_phy_addr = ((ext_phy_config &
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
-
- /* SW reset */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
-
- /* Set Low Power Mode */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<11);
-
-
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- DP(NETIF_MSG_LINK, "Setting 8073 port %d into"
+ } else if (ext_phy_type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
+ DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
"low power mode\n",
port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
- }
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
}
}
/* reset the SerDes/XGXS */
@@ -3995,6 +4174,73 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
return 0;
}
+static u8 bnx2x_update_link_down(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+ bnx2x_set_led(bp, port, LED_MODE_OFF,
+ 0, params->hw_led_mode,
+ params->chip_id);
+
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+
+ /* update shared memory */
+ vars->link_status = 0;
+ vars->line_speed = 0;
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* reset BigMac */
+ bnx2x_bmac_rx_disable(bp, params->port);
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ return 0;
+}
+
+static u8 bnx2x_update_link_up(struct link_params *params,
+ struct link_vars *vars,
+ u8 link_10g, u32 gp_status)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u8 rc = 0;
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ if (link_10g) {
+ bnx2x_bmac_enable(params, vars, 0);
+ bnx2x_set_led(bp, port, LED_MODE_OPER,
+ SPEED_10000, params->hw_led_mode,
+ params->chip_id);
+
+ } else {
+ bnx2x_emac_enable(params, vars, 0);
+ rc = bnx2x_emac_program(params, vars->line_speed,
+ vars->duplex);
+
+ /* AN complete? */
+ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
+ if (!(vars->phy_flags &
+ PHY_SGMII_FLAG))
+ bnx2x_set_sgmii_tx_driver(params);
+ }
+ }
+
+ /* PBF - link up */
+ rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+
+ /* disable drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+ /* update shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ return rc;
+}
/* This function should called upon link interrupt */
/* In case vars->link_up, driver needs to
1. Update the pbf
@@ -4012,10 +4258,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
- u16 i;
u16 gp_status;
- u16 link_10g;
- u8 rc = 0;
+ u8 link_10g;
+ u8 ext_phy_link_up, rc = 0;
+ u32 ext_phy_type;
DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
port,
@@ -4031,15 +4277,16 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* avoid fast toggling */
- for (i = 0; i < 10; i++) {
- msleep(10);
- CL45_RD_OVER_CL22(bp, port, params->phy_addr,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
- }
+ /* Check external link change only for non-direct */
+ ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars);
+
+ /* Read gp_status */
+ CL45_RD_OVER_CL22(bp, port, params->phy_addr,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
rc = bnx2x_link_settings_status(params, vars, gp_status);
if (rc != 0)
@@ -4055,73 +4302,177 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
bnx2x_link_int_ack(params, vars, link_10g);
+ /* In case external phy link is up, and internal link is down
+ ( not initialized yet probably after link initialization, it needs
+ to be initialized.
+ Note that after link down-up as result of cable plug,
+ the xgxs link would probably become up again without the need to
+ initialize it*/
+
+ if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
+ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
+ (ext_phy_link_up && !vars->phy_link_up))
+ bnx2x_init_internal_phy(params, vars);
+
/* link is up only if both local phy and external phy are up */
- vars->link_up = (vars->phy_link_up &&
- bnx2x_ext_phy_is_link_up(params, vars));
+ vars->link_up = (ext_phy_link_up && vars->phy_link_up);
- if (!vars->phy_link_up &&
- REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18)) {
- bnx2x_ext_phy_is_link_up(params, vars); /* Clear interrupt */
+ if (vars->link_up)
+ rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
+ else
+ rc = bnx2x_update_link_down(params, vars);
+
+ return rc;
+}
+
+static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+{
+ u8 ext_phy_addr[PORT_MAX];
+ u16 val;
+ s8 port;
+
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* Extract the ext phy address for the port */
+ u32 ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config));
+
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ ext_phy_addr[port] =
+ ((ext_phy_config &
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+
+ /* Need to take the phy out of low power mode in order
+ to write to access its registers */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+
+ /* Reset the phy */
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
}
- if (vars->link_up) {
- vars->link_status |= LINK_STATUS_LINK_UP;
- if (link_10g) {
- bnx2x_bmac_enable(params, vars, 0);
- bnx2x_set_led(bp, port, LED_MODE_OPER,
- SPEED_10000, params->hw_led_mode,
- params->chip_id);
+ /* Add delay of 150ms after reset */
+ msleep(150);
- } else {
- bnx2x_emac_enable(params, vars, 0);
- rc = bnx2x_emac_program(params, vars->line_speed,
- vars->duplex);
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u16 fw_ver1;
- /* AN complete? */
- if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
- if (!(vars->phy_flags &
- PHY_SGMII_FLAG))
- bnx2x_set_sgmii_tx_driver(params);
- }
+ bnx2x_bcm8073_external_rom_boot(bp, port,
+ ext_phy_addr[port]);
+
+ bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ if (fw_ver1 == 0) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_common_init_phy port %x "
+ "fw Download failed\n", port);
+ return -EINVAL;
}
- /* PBF - link up */
- rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
- vars->line_speed);
+ /* Only set bit 10 = 1 (Tx power down) */
+ bnx2x_cl45_read(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
- /* disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+ /* Phase1 of TX_POWER_DOWN reset */
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
+ }
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
+ /* Toggle Transmitter: Power down and then up with 600ms
+ delay between */
+ msleep(600);
- } else { /* link down */
- DP(NETIF_MSG_LINK, "Port %x: Link is down\n", params->port);
- bnx2x_set_led(bp, port, LED_MODE_OFF,
- 0, params->hw_led_mode,
- params->chip_id);
+ /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* Phase2 of POWER_DOWN_RESET*/
+ /* Release bit 10 (Release Tx power down) */
+ bnx2x_cl45_read(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
- /* indicate no mac active */
- vars->mac_type = MAC_TYPE_NONE;
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ msleep(15);
- /* update shared memory */
- vars->link_status = 0;
- bnx2x_update_mng(params, vars->link_status);
+ /* Read modify write the SPI-ROM version select register */
+ bnx2x_cl45_read(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
- /* activate nig drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+ /* set GPIO2 back to LOW */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ }
+ return 0;
- /* reset BigMac */
- bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+}
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+{
+ u8 rc = 0;
+ u32 ext_phy_type;
+
+ DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n");
+
+ /* Read the ext_phy_type for arbitrary port(0) */
+ ext_phy_type = XGXS_EXT_PHY_TYPE(
+ REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].external_phy_config)));
+
+ switch (ext_phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ {
+ rc = bnx2x_8073_common_init_phy(bp, shmem_base);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK,
+ "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
+ ext_phy_type);
+ break;
}
return rc;
}
+
+
static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
{
u16 val, cnt;
@@ -4154,7 +4505,7 @@ static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
}
#define RESERVED_SIZE 256
/* max application is 160K bytes - data at end of RAM */
-#define MAX_APP_SIZE 160*1024 - RESERVED_SIZE
+#define MAX_APP_SIZE (160*1024 - RESERVED_SIZE)
/* Header is 14 bytes */
#define HEADER_SIZE 14
@@ -4192,12 +4543,12 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
size = MAX_APP_SIZE+HEADER_SIZE;
}
DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]);
- DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]);
+ DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]);
/* Put the DSP in download mode by setting FLASH_CFG[2] to 1
and issuing a reset.*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH);
+ MISC_REGISTERS_GPIO_HIGH, port);
bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
@@ -4429,7 +4780,8 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
}
/* DSP Remove Download Mode */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, MISC_REGISTERS_GPIO_LOW);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_LOW, port);
bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
@@ -4437,7 +4789,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
for (cnt = 0; cnt < 100; cnt++)
msleep(5);
- bnx2x_hw_reset(bp);
+ bnx2x_hw_reset(bp, port);
for (cnt = 0; cnt < 100; cnt++)
msleep(5);
@@ -4473,7 +4825,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
MDIO_PMA_REG_7101_VER2,
&image_revision2);
- if (data[0x14e] != (image_revision2&0xFF) ||
+ if (data[0x14e] != (image_revision2&0xFF) ||
data[0x14f] != ((image_revision2&0xFF00)>>8) ||
data[0x150] != (image_revision1&0xFF) ||
data[0x151] != ((image_revision1&0xFF00)>>8)) {
@@ -4508,11 +4860,11 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
/* Take ext phy out of reset */
if (!driver_loaded)
- bnx2x_turn_on_sf(bp, port, ext_phy_addr);
+ bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type);
rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr,
data, size);
if (!driver_loaded)
- bnx2x_turn_off_sf(bp);
+ bnx2x_turn_off_sf(bp, port);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index 714d37ac95de..86d54a17b411 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -55,14 +55,17 @@ struct link_params {
#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS_10 3
#define LOOPBACK_EXT_PHY 4
+#define LOOPBACK_EXT 5
u16 req_duplex;
u16 req_flow_ctrl;
+ u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+ req_flow_ctrl is set to AUTO */
u16 req_line_speed; /* Also determine AutoNeg */
/* Device parameters */
u8 mac_addr[6];
- u16 mtu;
+
/* shmem parameters */
@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
u8 phy_addr, u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
- and update the link vars accordinaly */
+ and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output);
/* returns string representing the fw_version of the external phy */
@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
- to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to
+ to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/
u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
u16 hw_led_mode, u32 chip_id);
@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
otherwise link is down*/
u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
+/* One-time initialization for external phy after power up */
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 272a4bd25953..3e7dc171cdf1 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -60,8 +60,8 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
-#define DRV_MODULE_VERSION "1.45.6"
-#define DRV_MODULE_RELDATE "2008/06/23"
+#define DRV_MODULE_VERSION "1.45.17"
+#define DRV_MODULE_RELDATE "2008/08/13"
#define BNX2X_BC_VER 0x040200
/* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +76,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
+static int disable_tpa;
static int use_inta;
static int poll;
static int debug;
-static int disable_tpa;
-static int nomcp;
static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
static int use_multi;
+module_param(disable_tpa, int, 0);
module_param(use_inta, int, 0);
module_param(poll, int, 0);
module_param(debug, int, 0);
-module_param(disable_tpa, int, 0);
-module_param(nomcp, int, 0);
+MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
MODULE_PARM_DESC(poll, "use polling (for debug)");
MODULE_PARM_DESC(debug, "default debug msglevel");
-MODULE_PARM_DESC(nomcp, "ignore management CPU");
#ifdef BNX2X_MULTI
module_param(use_multi, int, 0);
@@ -237,17 +235,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
while (*wb_comp != DMAE_COMP_VAL) {
DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
- /* adjust delay for emulation/FPGA */
- if (CHIP_REV_IS_SLOW(bp))
- msleep(100);
- else
- udelay(5);
-
if (!cnt) {
BNX2X_ERR("dmae timeout!\n");
break;
}
cnt--;
+ /* adjust delay for emulation/FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(100);
+ else
+ udelay(5);
}
mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +307,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
while (*wb_comp != DMAE_COMP_VAL) {
- /* adjust delay for emulation/FPGA */
- if (CHIP_REV_IS_SLOW(bp))
- msleep(100);
- else
- udelay(5);
-
if (!cnt) {
BNX2X_ERR("dmae timeout!\n");
break;
}
cnt--;
+ /* adjust delay for emulation/FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(100);
+ else
+ udelay(5);
}
DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +499,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
int i;
u16 j, start, end;
+ bp->stats_state = STATS_STATE_DISABLED;
+ DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
BNX2X_ERR("begin crash dump -----------------\n");
for_each_queue(bp, i) {
@@ -513,17 +512,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
" tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
- BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
- " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)"
- " rx_sge_prod(%x) last_max_sge(%x)\n",
- fp->rx_comp_prod, fp->rx_comp_cons,
- le16_to_cpu(*fp->rx_cons_sb),
- le16_to_cpu(*fp->rx_bd_cons_sb),
- fp->rx_sge_prod, fp->last_max_sge);
- BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
- " bd data(%x,%x) rx_alloc_failed(%lx)\n",
- fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
- hw_prods->bds_prod, fp->rx_alloc_failed);
+ BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
+ " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
+ " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
+ fp->rx_bd_prod, fp->rx_bd_cons,
+ le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
+ fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+ BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
+ " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
+ " *sb_u_idx(%x) bd data(%x,%x)\n",
+ fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
+ fp->status_blk->c_status_block.status_block_index,
+ fp->fp_u_idx,
+ fp->status_blk->u_status_block.status_block_index,
+ hw_prods->packets_prod, hw_prods->bds_prod);
start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +555,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
j, rx_bd[1], rx_bd[0], sw_bd->skb);
}
- start = 0;
- end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
+ start = RX_SGE(fp->rx_sge_prod);
+ end = RX_SGE(fp->last_max_sge);
for (j = start; j < end; j++) {
u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +584,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
bnx2x_fw_dump(bp);
bnx2x_mc_assert(bp);
BNX2X_ERR("end crash dump -----------------\n");
-
- bp->stats_state = STATS_STATE_DISABLED;
- DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
}
static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +683,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp)
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
u8 storm, u16 index, u8 op, u8 update)
{
- u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+ COMMAND_REG_INT_ACK);
struct igu_ack_register igu_ack;
igu_ack.status_block_index = index;
@@ -694,9 +694,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
(update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
(op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
- DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
- (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
- REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
+ DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
+ (*(u32 *)&igu_ack), hc_addr);
+ REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
}
static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +716,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
return rc;
}
-static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
-{
- u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
-
- if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
- rx_cons_sb++;
-
- if ((fp->rx_comp_cons != rx_cons_sb) ||
- (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
- (fp->tx_pkt_prod != fp->tx_pkt_cons))
- return 1;
-
- return 0;
-}
-
static u16 bnx2x_ack_int(struct bnx2x *bp)
{
- u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
- u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+ COMMAND_REG_SIMD_MASK);
+ u32 result = REG_RD(bp, hc_addr);
- DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
- result, BAR_IGU_INTMEM + igu_addr);
+ DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
+ result, hc_addr);
-#ifdef IGU_DEBUG
-#warning IGU_DEBUG active
- if (result == 0) {
- BNX2X_ERR("read %x from IGU\n", result);
- REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
- }
-#endif
return result;
}
@@ -898,6 +877,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
netif_tx_lock(bp->dev);
if (netif_queue_stopped(bp->dev) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
(bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
netif_wake_queue(bp->dev);
@@ -905,6 +885,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
}
}
+
static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
union eth_rx_cqe *rr_cqe)
{
@@ -960,6 +941,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
break;
+
case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1169,8 +1151,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
memset(fp->sge_mask, 0xff,
(NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
- /* Clear the two last indeces in the page to 1:
- these are the indeces that correspond to the "next" element,
+ /* Clear the two last indices in the page to 1:
+ these are the indices that correspond to the "next" element,
hence will never be indicated and should be removed from
the calculations. */
bnx2x_clear_sge_mask_next_elems(fp);
@@ -1261,7 +1243,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
where we are and drop the whole packet */
err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
if (unlikely(err)) {
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
return err;
}
@@ -1297,14 +1279,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
- /* if alloc failed drop the packet and keep the buffer in the bin */
if (likely(new_skb)) {
+ /* fix ip xsum and give it to the stack */
+ /* (no need to map the new skb) */
prefetch(skb);
prefetch(((char *)(skb)) + 128);
- /* else fix ip xsum and give it to the stack */
- /* (no need to map the new skb) */
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > bp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
@@ -1353,9 +1334,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
fp->tpa_pool[queue].skb = new_skb;
} else {
+ /* else drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate new skb - dropping packet!\n");
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
}
fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1372,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
int rx_pkt = 0;
- u16 queue;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -1456,7 +1437,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if ((!fp->disable_tpa) &&
(TPA_TYPE(cqe_fp_flags) !=
(TPA_TYPE_START | TPA_TYPE_END))) {
- queue = cqe->fast_path_cqe.queue_index;
+ u16 queue = cqe->fast_path_cqe.queue_index;
if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1484,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
/* is this an error packet? */
if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
- /* do we sometimes forward error packets anyway? */
DP(NETIF_MSG_RX_ERR,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
- /* TBD make sure MC counts this as a drop */
+ bp->eth_stats.rx_err_discard_pkt++;
goto reuse_rx;
}
@@ -1524,7 +1504,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped "
"because of alloc failure\n");
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
goto reuse_rx;
}
@@ -1550,7 +1530,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped because "
"of alloc failure\n");
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
goto next_rx;
@@ -1559,10 +1539,12 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_NONE;
- if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* TBD do we pass bad csum packets in promisc */
+ if (bp->rx_csum) {
+ if (likely(BNX2X_RX_CSUM_OK(cqe)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ bp->eth_stats.hw_csum_err++;
+ }
}
#ifdef BCM_VLAN
@@ -1615,6 +1597,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
struct net_device *dev = bp->dev;
int index = FP_IDX(fp);
+ /* Return here if interrupt is disabled */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+ return IRQ_HANDLED;
+ }
+
DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
index, FP_SB_ID(fp));
bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1636,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
}
DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
-#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
- return IRQ_HANDLED;
-#endif
-
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
return IRQ_HANDLED;
}
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
mask = 0x2 << bp->fp[0].sb_id;
if (status & mask) {
struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1687,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
* General service functions
*/
-static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
+static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
{
u32 lock_status;
u32 resource_bit = (1 << resource);
- u8 port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
int cnt;
/* Validating that the resource is within range */
@@ -1714,8 +1703,15 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
return -EINVAL;
}
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
/* Validating that the resource is not already taken */
- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
if (lock_status & resource_bit) {
DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
lock_status, resource_bit);
@@ -1725,9 +1721,8 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
/* Try for 1 second every 5ms */
for (cnt = 0; cnt < 200; cnt++) {
/* Try to acquire the lock */
- REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
- resource_bit);
- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
if (lock_status & resource_bit)
return 0;
@@ -1737,11 +1732,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
return -EAGAIN;
}
-static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
+static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
{
u32 lock_status;
u32 resource_bit = (1 << resource);
- u8 port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1747,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
return -EINVAL;
}
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
/* Validating that the resource is currently taken */
- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
lock_status, resource_bit);
return -EFAULT;
}
- REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
+ REG_WR(bp, hw_lock_control_reg, resource_bit);
return 0;
}
/* HW Lock for shared dual port PHYs */
-static void bnx2x_phy_hw_lock(struct bnx2x *bp)
+static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
@@ -1772,25 +1775,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp)
if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
(ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
}
-static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
+static void bnx2x_release_phy_lock(struct bnx2x *bp)
{
u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
(ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
mutex_unlock(&bp->port.phy_mutex);
}
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
{
/* The GPIO should be swapped if swap register is set and active */
int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
- REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
int gpio_shift = gpio_num +
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1804,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
return -EINVAL;
}
- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
/* read GPIO and mask except the float bits */
gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
@@ -1822,7 +1825,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
break;
- case MISC_REGISTERS_GPIO_INPUT_HI_Z :
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
gpio_num, gpio_shift);
/* set FLOAT */
@@ -1834,7 +1837,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
}
REG_WR(bp, MISC_REG_GPIO, gpio_reg);
- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
return 0;
}
@@ -1850,19 +1853,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
return -EINVAL;
}
- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
/* read SPIO and mask except the float bits */
spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
switch (mode) {
- case MISC_REGISTERS_SPIO_OUTPUT_LOW :
+ case MISC_REGISTERS_SPIO_OUTPUT_LOW:
DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
/* clear FLOAT and set CLR */
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
break;
- case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
+ case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
/* clear FLOAT and set SET */
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1883,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
}
REG_WR(bp, MISC_REG_SPIO, spio_reg);
- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
return 0;
}
@@ -1940,46 +1943,63 @@ static void bnx2x_link_report(struct bnx2x *bp)
static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
{
- u8 rc;
+ if (!BP_NOMCP(bp)) {
+ u8 rc;
- /* Initialize link parameters structure variables */
- bp->link_params.mtu = bp->dev->mtu;
+ /* Initialize link parameters structure variables */
+ /* It is recommended to turn off RX FC for jumbo frames
+ for better performance */
+ if (IS_E1HMF(bp))
+ bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
+ else if (bp->dev->mtu > 5000)
+ bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
+ else
+ bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
- bnx2x_phy_hw_lock(bp);
- rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
- if (bp->link_vars.link_up)
- bnx2x_link_report(bp);
+ if (bp->link_vars.link_up)
+ bnx2x_link_report(bp);
- bnx2x_calc_fc_adv(bp);
+ bnx2x_calc_fc_adv(bp);
- return rc;
+ return rc;
+ }
+ BNX2X_ERR("Bootcode is missing -not initializing link\n");
+ return -EINVAL;
}
static void bnx2x_link_set(struct bnx2x *bp)
{
- bnx2x_phy_hw_lock(bp);
- bnx2x_phy_init(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
- bnx2x_calc_fc_adv(bp);
+ bnx2x_calc_fc_adv(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing -not setting link\n");
}
static void bnx2x__link_reset(struct bnx2x *bp)
{
- bnx2x_phy_hw_lock(bp);
- bnx2x_link_reset(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing -not resetting link\n");
}
static u8 bnx2x_link_test(struct bnx2x *bp)
{
u8 rc;
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
return rc;
}
@@ -1991,7 +2011,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp)
sum of vn_min_rates
or
0 - if all the min_rates are 0.
- In the later case fainess algorithm should be deactivated.
+ In the later case fairness algorithm should be deactivated.
If not all min_rates are zero then those that are zeroes will
be set to 1.
*/
@@ -2114,7 +2134,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
/* If FAIRNESS is enabled (not all min rates are zeroes) and
if current min rate is zero - set it to 1.
- This is a requirment of the algorithm. */
+ This is a requirement of the algorithm. */
if ((vn_min_rate == 0) && wsum)
vn_min_rate = DEF_MIN_RATE;
vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2223,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
bnx2x_link_update(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
if (bp->link_vars.link_up) {
@@ -2357,7 +2377,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
}
/* acquire split MCP access lock register */
-static int bnx2x_lock_alr(struct bnx2x *bp)
+static int bnx2x_acquire_alr(struct bnx2x *bp)
{
u32 i, j, val;
int rc = 0;
@@ -2374,15 +2394,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp)
msleep(5);
}
if (!(val & (1L << 31))) {
- BNX2X_ERR("Cannot acquire nvram interface\n");
+ BNX2X_ERR("Cannot acquire MCP access lock register\n");
rc = -EBUSY;
}
return rc;
}
-/* Release split MCP access lock register */
-static void bnx2x_unlock_alr(struct bnx2x *bp)
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
{
u32 val = 0;
@@ -2395,7 +2415,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
u16 rc = 0;
barrier(); /* status block is written to by the chip */
-
if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
rc |= 1;
@@ -2426,26 +2445,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
{
int port = BP_PORT(bp);
- int func = BP_FUNC(bp);
- u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
+ u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_SET);
u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
MISC_REG_AEU_MASK_ATTN_FUNC_0;
u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
NIG_REG_MASK_INTERRUPT_PORT0;
+ u32 aeu_mask;
- if (~bp->aeu_mask & (asserted & 0xff))
- BNX2X_ERR("IGU ERROR\n");
if (bp->attn_state & asserted)
BNX2X_ERR("IGU ERROR\n");
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, aeu_addr);
+
DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
- bp->aeu_mask, asserted);
- bp->aeu_mask &= ~(asserted & 0xff);
- DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
+ aeu_mask, asserted);
+ aeu_mask &= ~(asserted & 0xff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
- REG_WR(bp, aeu_addr, bp->aeu_mask);
+ REG_WR(bp, aeu_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
bp->attn_state |= asserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
if (asserted & ATTN_HARD_WIRED_MASK) {
if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2524,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
} /* if hardwired */
- DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
- asserted, BAR_IGU_INTMEM + igu_addr);
- REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+ asserted, hc_addr);
+ REG_WR(bp, hc_addr, asserted);
/* now set back the mask */
if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2530,12 +2554,12 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
/* Fan failure attention */
- /* The PHY reset is controled by GPIO 1 */
+ /* The PHY reset is controlled by GPIO 1 */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
- /* Low power mode is controled by GPIO 2 */
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ /* Low power mode is controlled by GPIO 2 */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
/* mark the failure */
bp->link_params.ext_phy_config &=
~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2723,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
int index;
u32 reg_addr;
u32 val;
+ u32 aeu_mask;
/* need to take HW lock because MCP or other port might also
try to handle this event */
- bnx2x_lock_alr(bp);
+ bnx2x_acquire_alr(bp);
attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2759,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
HW_PRTY_ASSERT_SET_1) ||
(attn.sig[2] & group_mask.sig[2] &
HW_PRTY_ASSERT_SET_2))
- BNX2X_ERR("FATAL HW block parity attention\n");
+ BNX2X_ERR("FATAL HW block parity attention\n");
}
}
- bnx2x_unlock_alr(bp);
+ bnx2x_release_alr(bp);
- reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
+ reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
val = ~deasserted;
-/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
- val, BAR_IGU_INTMEM + reg_addr); */
- REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+ val, reg_addr);
+ REG_WR(bp, reg_addr, val);
- if (bp->aeu_mask & (deasserted & 0xff))
- BNX2X_ERR("IGU BUG!\n");
if (~bp->attn_state & deasserted)
- BNX2X_ERR("IGU BUG!\n");
+ BNX2X_ERR("IGU ERROR\n");
reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
MISC_REG_AEU_MASK_ATTN_FUNC_0;
- DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
- bp->aeu_mask |= (deasserted & 0xff);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, reg_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
+ aeu_mask, deasserted);
+ aeu_mask |= (deasserted & 0xff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
- DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
- REG_WR(bp, reg_addr, bp->aeu_mask);
+ REG_WR(bp, reg_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
bp->attn_state &= ~deasserted;
@@ -2800,7 +2828,7 @@ static void bnx2x_sp_task(struct work_struct *work)
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
+ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
return;
}
@@ -2808,7 +2836,7 @@ static void bnx2x_sp_task(struct work_struct *work)
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
- DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
/* HW attentions */
if (status & 0x1)
@@ -2838,7 +2866,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
+ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
return IRQ_HANDLED;
}
@@ -2876,11 +2904,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
/* underflow */ \
d_hi = m_hi - s_hi; \
if (d_hi > 0) { \
- /* we can 'loan' 1 */ \
+ /* we can 'loan' 1 */ \
d_hi--; \
d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
} else { \
- /* m_hi <= s_hi */ \
+ /* m_hi <= s_hi */ \
d_hi = 0; \
d_lo = 0; \
} \
@@ -2890,7 +2918,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
d_hi = 0; \
d_lo = 0; \
} else { \
- /* m_hi >= s_hi */ \
+ /* m_hi >= s_hi */ \
d_hi = m_hi - s_hi; \
d_lo = m_lo - s_lo; \
} \
@@ -2963,37 +2991,6 @@ static inline long bnx2x_hilo(u32 *hiref)
* Init service functions
*/
-static void bnx2x_storm_stats_init(struct bnx2x *bp)
-{
- int func = BP_FUNC(bp);
-
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-}
-
static void bnx2x_storm_stats_post(struct bnx2x *bp)
{
if (!bp->stats_pending) {
@@ -3032,6 +3029,8 @@ static void bnx2x_stats_init(struct bnx2x *bp)
memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
bp->port.old_nig_stats.brb_discard =
REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+ bp->port.old_nig_stats.brb_truncate =
+ REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
&(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3100,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
might_sleep();
while (*stats_comp != DMAE_COMP_VAL) {
- msleep(1);
if (!cnt) {
BNX2X_ERR("timeout waiting for stats finished\n");
break;
}
cnt--;
+ msleep(1);
}
return 1;
}
@@ -3451,8 +3450,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
- UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
- UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3534,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
new->brb_discard - old->brb_discard);
+ ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+ new->brb_truncate - old->brb_truncate);
UPDATE_STAT64_NIG(egress_mac_pkt0,
etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3713,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->rx_length_errors =
estats->rx_stat_etherstatsundersizepkts_lo +
estats->jabber_packets_received;
- nstats->rx_over_errors = estats->brb_drop_lo +
- estats->brb_truncate_discard;
+ nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3782,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bp->fp->rx_comp_cons),
le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
- netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
+ netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
estats->driver_xoff, estats->brb_drop_lo);
printk(KERN_DEBUG "tstats: checksum_discard %u "
"packets_too_big_discard %u no_buff_discard %u "
@@ -3994,14 +3993,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
- sizeof(struct ustorm_def_status_block)/4);
+ sizeof(struct ustorm_status_block)/4);
bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
- sizeof(struct cstorm_def_status_block)/4);
+ sizeof(struct cstorm_status_block)/4);
}
-static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
- struct host_status_block *sb, dma_addr_t mapping)
+static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+ dma_addr_t mapping, int sb_id)
{
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
@@ -4077,7 +4076,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
atten_status_block);
def_sb->atten_status_block.status_block_id = sb_id;
- bp->def_att_idx = 0;
bp->attn_state = 0;
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4092,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
reg_offset + 0xc + 0x10*index);
}
- bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
- MISC_REG_AEU_MASK_ATTN_FUNC_0));
-
reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
HC_REG_ATTN_MSG0_ADDR_L);
@@ -4114,17 +4109,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
u_def_status_block);
def_sb->u_def_status_block.status_block_id = sb_id;
- bp->def_u_idx = 0;
-
REG_WR(bp, BAR_USTRORM_INTMEM +
USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
REG_WR(bp, BAR_USTRORM_INTMEM +
((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
U64_HI(section));
- REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
+ REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
- REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
- BNX2X_BTR);
for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4126,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
c_def_status_block);
def_sb->c_def_status_block.status_block_id = sb_id;
- bp->def_c_idx = 0;
-
REG_WR(bp, BAR_CSTRORM_INTMEM +
CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
REG_WR(bp, BAR_CSTRORM_INTMEM +
((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
U64_HI(section));
- REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
- BNX2X_BTR);
for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4143,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
t_def_status_block);
def_sb->t_def_status_block.status_block_id = sb_id;
- bp->def_t_idx = 0;
-
REG_WR(bp, BAR_TSTRORM_INTMEM +
TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
REG_WR(bp, BAR_TSTRORM_INTMEM +
((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
U64_HI(section));
- REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
- BNX2X_BTR);
for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4160,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
x_def_status_block);
def_sb->x_def_status_block.status_block_id = sb_id;
- bp->def_x_idx = 0;
-
REG_WR(bp, BAR_XSTRORM_INTMEM +
XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
REG_WR(bp, BAR_XSTRORM_INTMEM +
((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
U64_HI(section));
- REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
- BNX2X_BTR);
for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
REG_WR16(bp, BAR_XSTRORM_INTMEM +
XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
bp->stats_pending = 0;
+ bp->set_mac_pending = 0;
bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
}
@@ -4209,21 +4189,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
/* HC_INDEX_U_ETH_RX_CQ_CONS */
REG_WR8(bp, BAR_USTRORM_INTMEM +
USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
- HC_INDEX_U_ETH_RX_CQ_CONS),
+ U_SB_ETH_RX_CQ_INDEX),
bp->rx_ticks/12);
REG_WR16(bp, BAR_USTRORM_INTMEM +
USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
- HC_INDEX_U_ETH_RX_CQ_CONS),
+ U_SB_ETH_RX_CQ_INDEX),
+ bp->rx_ticks ? 0 : 1);
+ REG_WR16(bp, BAR_USTRORM_INTMEM +
+ USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
+ U_SB_ETH_RX_BD_INDEX),
bp->rx_ticks ? 0 : 1);
/* HC_INDEX_C_ETH_TX_CQ_CONS */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
- HC_INDEX_C_ETH_TX_CQ_CONS),
+ C_SB_ETH_TX_CQ_INDEX),
bp->tx_ticks/12);
REG_WR16(bp, BAR_CSTRORM_INTMEM +
CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
- HC_INDEX_C_ETH_TX_CQ_CONS),
+ C_SB_ETH_TX_CQ_INDEX),
bp->tx_ticks ? 0 : 1);
}
}
@@ -4256,7 +4240,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
static void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
- u16 ring_prod, cqe_ring_prod = 0;
+ int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+ ETH_MAX_AGGREGATION_QUEUES_E1H;
+ u16 ring_prod, cqe_ring_prod;
int i, j;
bp->rx_buf_use_size = bp->dev->mtu;
@@ -4270,9 +4256,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
bp->dev->mtu + ETH_OVREHEAD);
for_each_queue(bp, j) {
- for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
- struct bnx2x_fastpath *fp = &bp->fp[j];
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+ for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
netdev_alloc_skb(bp->dev, bp->rx_buf_size);
if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4338,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
BNX2X_ERR("disabling TPA for queue[%d]\n", j);
/* Cleanup already allocated elements */
bnx2x_free_rx_sge_range(bp, fp, ring_prod);
- bnx2x_free_tpa_pool(bp, fp,
- ETH_MAX_AGGREGATION_QUEUES_E1H);
+ bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
fp->disable_tpa = 1;
ring_prod = 0;
break;
@@ -4363,13 +4348,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->rx_sge_prod = ring_prod;
/* Allocate BDs and initialize BD ring */
- fp->rx_comp_cons = fp->rx_alloc_failed = 0;
+ fp->rx_comp_cons = 0;
cqe_ring_prod = ring_prod = 0;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
BNX2X_ERR("was only able to allocate "
"%d rx skbs\n", i);
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
break;
}
ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4497,7 +4482,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
}
context->cstorm_st_context.sb_index_number =
- HC_INDEX_C_ETH_TX_CQ_CONS;
+ C_SB_ETH_TX_CQ_INDEX;
context->cstorm_st_context.status_block_id = sb_id;
context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4520,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
int i;
tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
- tstorm_client.statistics_counter_id = 0;
+ tstorm_client.statistics_counter_id = BP_CL_ID(bp);
tstorm_client.config_flags =
TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
#ifdef BCM_VLAN
@@ -4579,7 +4564,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
int func = BP_FUNC(bp);
int i;
- DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
+ DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
switch (mode) {
case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4602,35 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
bnx2x_set_client_config(bp);
}
-static void bnx2x_init_internal(struct bnx2x *bp)
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+ int i;
+
+ /* Zero this manually as its initialization is
+ currently missing in the initTool */
+ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_AGG_DATA_OFFSET + i * 4, 0);
+}
+
+static void bnx2x_init_internal_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+}
+
+static void bnx2x_init_internal_func(struct bnx2x *bp)
{
struct tstorm_eth_function_common_config tstorm_config = {0};
struct stats_indication_flags stats_flags = {0};
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
int i;
+ u16 max_agg_size;
if (is_multi(bp)) {
tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4643,53 @@ static void bnx2x_init_internal(struct bnx2x *bp)
TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
(*(u32 *)&tstorm_config));
-/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
- (*(u32 *)&tstorm_config)); */
-
bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
bnx2x_set_storm_rx_mode(bp);
+ /* reset xstorm per client statistics */
+ for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
+ i*4, 0);
+ }
+ /* reset tstorm per client statistics */
+ for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
+ REG_WR(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
+ i*4, 0);
+ }
+
+ /* Init statistics related context */
stats_flags.collect_eth = 1;
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
((u32 *)&stats_flags)[1]);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
+ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
+ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
((u32 *)&stats_flags)[1]);
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
+ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
+ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
((u32 *)&stats_flags)[1]);
-/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
- ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+ U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+ U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+
+ REG_WR(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+ U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+ REG_WR(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+ U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
if (CHIP_IS_E1H(bp)) {
REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4705,12 @@ static void bnx2x_init_internal(struct bnx2x *bp)
bp->e1hov);
}
- /* Zero this manualy as its initialization is
- currently missing in the initTool */
- for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_AGG_DATA_OFFSET + 4*i, 0);
-
+ /* Init CQ ring mapping and aggregation size */
+ max_agg_size = min((u32)(bp->rx_buf_use_size +
+ 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
+ (u32)0xffff);
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- u16 max_agg_size;
REG_WR(bp, BAR_USTRORM_INTMEM +
USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4719,34 @@ static void bnx2x_init_internal(struct bnx2x *bp)
USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
U64_HI(fp->rx_comp_mapping));
- max_agg_size = min((u32)(bp->rx_buf_use_size +
- 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
- (u32)0xffff);
REG_WR16(bp, BAR_USTRORM_INTMEM +
USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
max_agg_size);
}
}
-static void bnx2x_nic_init(struct bnx2x *bp)
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ bnx2x_init_internal_common(bp);
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ bnx2x_init_internal_port(bp);
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ bnx2x_init_internal_func(bp);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ break;
+ }
+}
+
+static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
{
int i;
@@ -4717,19 +4761,20 @@ static void bnx2x_nic_init(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
- bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
- fp->status_blk_mapping);
+ bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
+ FP_SB_ID(fp));
+ bnx2x_update_fpsb_idx(fp);
}
- bnx2x_init_def_sb(bp, bp->def_status_blk,
- bp->def_status_blk_mapping, DEF_SB_ID);
+ bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
+ DEF_SB_ID);
+ bnx2x_update_dsb_idx(bp);
bnx2x_update_coalesce(bp);
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_ring(bp);
bnx2x_init_sp_ring(bp);
bnx2x_init_context(bp);
- bnx2x_init_internal(bp);
- bnx2x_storm_stats_init(bp);
+ bnx2x_init_internal(bp, load_code);
bnx2x_init_ind_table(bp);
bnx2x_int_enable(bp);
}
@@ -4878,7 +4923,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
REG_WR(bp, CFC_REG_DEBUG0, 0x1);
- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
/* Write 0 to parser credits for CFC search request */
REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4978,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
REG_WR(bp, CFC_REG_DEBUG0, 0x1);
- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
/* Write 0 to parser credits for CFC search request */
REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5045,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
REG_WR(bp, CFC_REG_DEBUG0, 0x0);
- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
DP(NETIF_MSG_HW, "done\n");
@@ -5089,11 +5134,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
#endif
-#ifndef BCM_ISCSI
- /* set NIC mode */
- REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
-
REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
#ifdef BCM_ISCSI
REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5203,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
}
bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
if (CHIP_IS_E1H(bp))
REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
@@ -5333,6 +5375,13 @@ static int bnx2x_init_common(struct bnx2x *bp)
((u32 *)&tmp)[1]);
}
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_common_init_phy(bp, bp->common.shmem_base);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
return 0;
}
@@ -5638,18 +5687,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
int func = BP_FUNC(bp);
u32 seq = ++bp->fw_seq;
u32 rc = 0;
+ u32 cnt = 1;
+ u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
- /* let the FW do it's magic ... */
- msleep(100); /* TBD */
+ do {
+ /* let the FW do it's magic ... */
+ msleep(delay);
- if (CHIP_REV_IS_SLOW(bp))
- msleep(900);
+ rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
- rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
- DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
+ /* Give the FW up to 2 second (200*10ms) */
+ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
+
+ DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt*delay, rc, seq);
/* is this a reply to our command? */
if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5767,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
NUM_RCQ_BD);
/* SGE ring */
+ BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
bnx2x_fp(bp, i, rx_sge_mapping),
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5890,7 +5945,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
dev_kfree_skb(skb);
}
if (!fp->disable_tpa)
- bnx2x_free_tpa_pool(bp, fp,
+ bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
+ ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H);
}
}
@@ -5976,8 +6032,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
bnx2x_msix_fp_int, 0,
bp->dev->name, &bp->fp[i]);
if (rc) {
- BNX2X_ERR("request fp #%d irq failed rc %d\n",
- i + offset, rc);
+ BNX2X_ERR("request fp #%d irq failed rc -%d\n",
+ i + offset, -rc);
bnx2x_free_msix_irqs(bp);
return -EBUSY;
}
@@ -6004,7 +6060,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
* Init service functions
*/
-static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
+static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
{
struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
int port = BP_PORT(bp);
@@ -6026,11 +6082,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
config->config_table[0].cam_entry.lsb_mac_addr =
swab16(*(u16 *)&bp->dev->dev_addr[4]);
config->config_table[0].cam_entry.flags = cpu_to_le16(port);
- config->config_table[0].target_table_entry.flags = 0;
+ if (set)
+ config->config_table[0].target_table_entry.flags = 0;
+ else
+ CAM_INVALIDATE(config->config_table[0]);
config->config_table[0].target_table_entry.client_id = 0;
config->config_table[0].target_table_entry.vlan_id = 0;
- DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
+ DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
+ (set ? "setting" : "clearing"),
config->config_table[0].cam_entry.msb_mac_addr,
config->config_table[0].cam_entry.middle_mac_addr,
config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6100,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
config->config_table[1].cam_entry.flags = cpu_to_le16(port);
- config->config_table[1].target_table_entry.flags =
+ if (set)
+ config->config_table[1].target_table_entry.flags =
TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
+ else
+ CAM_INVALIDATE(config->config_table[1]);
config->config_table[1].target_table_entry.client_id = 0;
config->config_table[1].target_table_entry.vlan_id = 0;
@@ -6050,12 +6113,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
}
-static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
+static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
{
struct mac_configuration_cmd_e1h *config =
(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
- if (bp->state != BNX2X_STATE_OPEN) {
+ if (set && (bp->state != BNX2X_STATE_OPEN)) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return;
}
@@ -6079,9 +6142,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
config->config_table[0].client_id = BP_L_ID(bp);
config->config_table[0].vlan_id = 0;
config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
- config->config_table[0].flags = BP_PORT(bp);
+ if (set)
+ config->config_table[0].flags = BP_PORT(bp);
+ else
+ config->config_table[0].flags =
+ MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
- DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
+ DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
+ (set ? "setting" : "clearing"),
config->config_table[0].msb_mac_addr,
config->config_table[0].middle_mac_addr,
config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6174,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
bnx2x_rx_int(bp->fp, 10);
/* if index is different from 0
* the reply for some commands will
- * be on the none default queue
+ * be on the non default queue
*/
if (idx)
bnx2x_rx_int(&bp->fp[idx], 10);
}
- mb(); /* state is changed by bnx2x_sp_event() */
+ mb(); /* state is changed by bnx2x_sp_event() */
if (*state_p == state)
return 0;
@@ -6167,7 +6235,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
u32 load_code;
int i, rc;
-
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EPERM;
@@ -6183,22 +6250,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (!BP_NOMCP(bp)) {
load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
if (!load_code) {
- BNX2X_ERR("MCP response failure, unloading\n");
+ BNX2X_ERR("MCP response failure, aborting\n");
return -EBUSY;
}
if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
return -EBUSY; /* other port in diagnostic mode */
} else {
+ int port = BP_PORT(bp);
+
DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
load_count[0]++;
- load_count[1 + BP_PORT(bp)]++;
+ load_count[1 + port]++;
DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
if (load_count[0] == 1)
load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[1 + BP_PORT(bp)] == 1)
+ else if (load_count[1 + port] == 1)
load_code = FW_MSG_CODE_DRV_LOAD_PORT;
else
load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6316,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_fp(bp, i, disable_tpa) =
((bp->flags & TPA_ENABLE_FLAG) == 0);
- /* Disable interrupt handling until HW is initialized */
- atomic_set(&bp->intr_sem, 1);
-
if (bp->flags & USING_MSIX_FLAG) {
rc = bnx2x_req_msix_irqs(bp);
if (rc) {
@@ -6276,17 +6342,14 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
goto load_error;
}
- /* Enable interrupt handling */
- atomic_set(&bp->intr_sem, 0);
-
/* Setup NIC internals and enable interrupts */
- bnx2x_nic_init(bp);
+ bnx2x_nic_init(bp, load_code);
/* Send LOAD_DONE command to MCP */
if (!BP_NOMCP(bp)) {
load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
if (!load_code) {
- BNX2X_ERR("MCP response failure, unloading\n");
+ BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
goto load_int_disable;
}
@@ -6301,11 +6364,12 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
for_each_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
+ /* Enable interrupt handling */
+ atomic_set(&bp->intr_sem, 0);
+
rc = bnx2x_setup_leading(bp);
if (rc) {
-#ifdef BNX2X_STOP_ON_ERROR
- bp->panic = 1;
-#endif
+ BNX2X_ERR("Setup leading failed!\n");
goto load_stop_netif;
}
@@ -6323,9 +6387,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
if (CHIP_IS_E1(bp))
- bnx2x_set_mac_addr_e1(bp);
+ bnx2x_set_mac_addr_e1(bp, 1);
else
- bnx2x_set_mac_addr_e1h(bp);
+ bnx2x_set_mac_addr_e1h(bp, 1);
if (bp->port.pmf)
bnx2x_initial_phy_init(bp);
@@ -6339,7 +6403,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
case LOAD_OPEN:
- /* IRQ is only requested from bnx2x_open */
netif_start_queue(bp->dev);
bnx2x_set_rx_mode(bp->dev);
if (bp->flags & USING_MSIX_FLAG)
@@ -6378,8 +6441,7 @@ load_int_disable:
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
for_each_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i,
- RX_SGE_CNT*NUM_RX_SGE_PAGES);
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
load_error:
bnx2x_free_mem(bp);
@@ -6411,7 +6473,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
return rc;
}
-static void bnx2x_stop_leading(struct bnx2x *bp)
+static int bnx2x_stop_leading(struct bnx2x *bp)
{
u16 dsb_sp_prod_idx;
/* if the other port is handling traffic,
@@ -6429,7 +6491,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
&(bp->fp[0].state), 1);
if (rc) /* timeout */
- return;
+ return rc;
dsb_sp_prod_idx = *bp->dsb_sp_prod;
@@ -6441,20 +6503,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
so there is not much to do if this times out
*/
while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
- msleep(1);
if (!cnt) {
DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
"dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
*bp->dsb_sp_prod, dsb_sp_prod_idx);
#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
+#else
+ rc = -EBUSY;
#endif
break;
}
cnt--;
+ msleep(1);
}
bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
+
+ return rc;
}
static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6562,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
if (val)
DP(NETIF_MSG_IFDOWN,
- "BRB1 is not empty %d blooks are occupied\n", val);
+ "BRB1 is not empty %d blocks are occupied\n", val);
/* TODO: Close Doorbell port? */
}
@@ -6536,11 +6602,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
}
}
-/* msut be called with rtnl_lock */
+/* must be called with rtnl_lock */
static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
{
+ int port = BP_PORT(bp);
u32 reset_code = 0;
- int i, cnt;
+ int i, cnt, rc;
bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
@@ -6557,22 +6624,17 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
(DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- /* Wait until all fast path tasks complete */
+ /* Wait until tx fast path tasks complete */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
-#ifdef BNX2X_STOP_ON_ERROR
-#ifdef __powerpc64__
- DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
- DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
-#endif
- fp->tpa_queue_used);
-#endif
cnt = 1000;
smp_rmb();
- while (bnx2x_has_work(fp)) {
- msleep(1);
+ while (BNX2X_HAS_TX_WORK(fp)) {
+
+ if (!netif_running(bp->dev))
+ bnx2x_tx_int(fp, 1000);
+
if (!cnt) {
BNX2X_ERR("timeout waiting for queue[%d]\n",
i);
@@ -6584,14 +6646,13 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
#endif
}
cnt--;
+ msleep(1);
smp_rmb();
}
}
- /* Wait until all slow path tasks complete */
- cnt = 1000;
- while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
- msleep(1);
+ /* Give HW time to discard old tx messages */
+ msleep(1);
for_each_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
@@ -6601,52 +6662,79 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Release IRQs */
bnx2x_free_irq(bp);
- if (bp->flags & NO_WOL_FLAG)
+ if (unload_mode == UNLOAD_NORMAL)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ else if (bp->flags & NO_WOL_FLAG) {
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+ if (CHIP_IS_E1H(bp))
+ REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
- else if (bp->wol) {
- u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ } else if (bp->wol) {
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
u32 val;
-
/* The mac address is written to entries 1-4 to
preserve entry 0 which is used by the PMF */
+ u8 entry = (BP_E1HVN(bp) + 1)*8;
+
val = (mac_addr[0] << 8) | mac_addr[1];
- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
- val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
} else
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+ if (CHIP_IS_E1(bp)) {
+ struct mac_configuration_cmd *config =
+ bnx2x_sp(bp, mcast_config);
+
+ bnx2x_set_mac_addr_e1(bp, 0);
+
+ for (i = 0; i < config->hdr.length_6b; i++)
+ CAM_INVALIDATE(config->config_table[i]);
+
+ config->hdr.length_6b = i;
+ if (CHIP_REV_IS_SLOW(bp))
+ config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
+ else
+ config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
+ config->hdr.client_id = BP_CL_ID(bp);
+ config->hdr.reserved1 = 0;
+
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+ U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
+ U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
+
+ } else { /* E1H */
+ bnx2x_set_mac_addr_e1h(bp, 0);
+
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+ }
+
+ if (CHIP_IS_E1H(bp))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
/* Close multi and leading connections
Completions for ramrods are collected in a synchronous way */
for_each_nondefault_queue(bp, i)
if (bnx2x_stop_multi(bp, i))
goto unload_error;
- if (CHIP_IS_E1H(bp))
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
-
- bnx2x_stop_leading(bp);
-#ifdef BNX2X_STOP_ON_ERROR
- /* If ramrod completion timed out - break here! */
- if (bp->panic) {
+ rc = bnx2x_stop_leading(bp);
+ if (rc) {
BNX2X_ERR("Stop leading failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
return -EBUSY;
- }
+#else
+ goto unload_error;
#endif
-
- if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
- (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
- DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
- "state 0x%x fp[0].state 0x%x\n",
- bp->state, bp->fp[0].state);
}
unload_error:
@@ -6656,12 +6744,12 @@ unload_error:
DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
load_count[0]--;
- load_count[1 + BP_PORT(bp)]--;
+ load_count[1 + port]--;
DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
if (load_count[0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[1 + BP_PORT(bp)] == 0)
+ else if (load_count[1 + port] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
else
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6769,7 @@ unload_error:
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
for_each_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i,
- RX_SGE_CNT*NUM_RX_SGE_PAGES);
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED;
@@ -6733,56 +6820,93 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
/* Check if it is the UNDI driver
* UNDI driver initializes CID offset for normal bell to 0x7
*/
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (val == 0x7) {
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
- /* save our func and fw_seq */
+ /* save our func */
int func = BP_FUNC(bp);
- u16 fw_seq = bp->fw_seq;
+ u32 swap_en;
+ u32 swap_val;
BNX2X_DEV_INFO("UNDI is active! reset device\n");
/* try unload UNDI on port 0 */
bp->func = 0;
- bp->fw_seq = (SHMEM_RD(bp,
- func_mb[bp->func].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
-
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
reset_code = bnx2x_fw_command(bp, reset_code);
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
/* if UNDI is loaded on the other port */
if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ /* send "DONE" for previous unload */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+ /* unload UNDI on port 1 */
bp->func = 1;
- bp->fw_seq = (SHMEM_RD(bp,
- func_mb[bp->func].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
-
- bnx2x_fw_command(bp,
- DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
- bnx2x_fw_command(bp,
- DRV_MSG_CODE_UNLOAD_DONE);
-
- /* restore our func and fw_seq */
- bp->func = func;
- bp->fw_seq = fw_seq;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ bnx2x_fw_command(bp, reset_code);
}
+ REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
+ HC_REG_CONFIG_0), 0x1000);
+
+ /* close input traffic and wait for it */
+ /* Do not rcv packets to BRB */
+ REG_WR(bp,
+ (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+ /* Do not direct rcv packets that are not for MCP to
+ * the BRB */
+ REG_WR(bp,
+ (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+ /* clear AEU */
+ REG_WR(bp,
+ (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+ msleep(10);
+
+ /* save NIG port swap info */
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
/* reset device */
REG_WR(bp,
GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
- 0xd3ffff7f);
+ 0xd3ffffff);
REG_WR(bp,
GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
0x1403);
+ /* take the NIG out of reset and restore swap values */
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ MISC_REGISTERS_RESET_REG_1_RST_NIG);
+ REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+ REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+
+ /* send unload done to the MCP */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+ /* restore our func and fw_seq */
+ bp->func = func;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
}
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
}
}
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
{
u32 val, val2, val3, val4, id;
+ u16 pmc;
/* Get the chip revision id and number. */
/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6964,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
BNX2X_ERR("This driver needs bc_ver %X but found %X,"
" please upgrade BC\n", BNX2X_BC_VER, val);
}
- BNX2X_DEV_INFO("%sWoL Capable\n",
- (bp->flags & NO_WOL_FLAG)? "Not " : "");
+
+ if (BP_E1HVN(bp) == 0) {
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+ } else {
+ /* no WOL capability for E1HVN != 0 */
+ bp->flags |= NO_WOL_FLAG;
+ }
+ BNX2X_DEV_INFO("%sWoL capable\n",
+ (bp->flags & NO_WOL_FLAG) ? "Not " : "");
val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7274,9 +7406,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_config =
SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
- val =
- (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
- FUNC_MF_CFG_E1HOV_TAG_MASK);
+ val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK);
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
bp->e1hov = val;
@@ -7324,7 +7455,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
if (BP_NOMCP(bp)) {
/* only supposed to happen on emulation/FPGA */
- BNX2X_ERR("warning rendom MAC workaround active\n");
+ BNX2X_ERR("warning random MAC workaround active\n");
random_ether_addr(bp->dev->dev_addr);
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
}
@@ -7337,8 +7468,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
int func = BP_FUNC(bp);
int rc;
- if (nomcp)
- bp->flags |= NO_MCP_FLAG;
+ /* Disable interrupt handling until HW is initialized */
+ atomic_set(&bp->intr_sem, 1);
mutex_init(&bp->port.phy_mutex);
@@ -7377,8 +7508,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->tx_ticks = 50;
bp->rx_ticks = 25;
- bp->stats_ticks = 1000000 & 0xffff00;
-
bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
bp->current_interval = (poll ? poll : bp->timer_interval);
@@ -7628,25 +7757,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
- char phy_fw_ver[PHY_FW_VER_LEN];
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
strcpy(info->driver, DRV_MODULE_NAME);
strcpy(info->version, DRV_MODULE_VERSION);
phy_fw_ver[0] = '\0';
if (bp->port.pmf) {
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
bnx2x_get_ext_phy_fw_version(&bp->link_params,
(bp->state != BNX2X_STATE_CLOSED),
phy_fw_ver, PHY_FW_VER_LEN);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
}
- snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
- BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION,
- BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
- ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
+ snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
strcpy(info->bus_info, pci_name(bp->pdev));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8226,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
if (eeprom->magic == 0x00504859)
if (bp->port.pmf) {
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_flash_download(bp, BP_PORT(bp),
bp->link_params.ext_phy_config,
(bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8238,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
rc |= bnx2x_phy_init(&bp->link_params,
&bp->link_vars);
}
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
} else /* Only the PMF can access the PHY */
return -EINVAL;
@@ -8128,7 +8257,6 @@ static int bnx2x_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs = bp->rx_ticks;
coal->tx_coalesce_usecs = bp->tx_ticks;
- coal->stats_block_coalesce_usecs = bp->stats_ticks;
return 0;
}
@@ -8146,44 +8274,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
if (bp->tx_ticks > 0x3000)
bp->tx_ticks = 0x3000;
- bp->stats_ticks = coal->stats_block_coalesce_usecs;
- if (bp->stats_ticks > 0xffff00)
- bp->stats_ticks = 0xffff00;
- bp->stats_ticks &= 0xffff00;
-
if (netif_running(dev))
bnx2x_update_coalesce(bp);
return 0;
}
-static int bnx2x_set_flags(struct net_device *dev, u32 data)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int changed = 0;
- int rc = 0;
-
- if (data & ETH_FLAG_LRO) {
- if (!(dev->features & NETIF_F_LRO)) {
- dev->features |= NETIF_F_LRO;
- bp->flags |= TPA_ENABLE_FLAG;
- changed = 1;
- }
-
- } else if (dev->features & NETIF_F_LRO) {
- dev->features &= ~NETIF_F_LRO;
- bp->flags &= ~TPA_ENABLE_FLAG;
- changed = 1;
- }
-
- if (changed && netif_running(dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
-
- return rc;
-}
-
static void bnx2x_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
@@ -8266,7 +8362,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
if (epause->autoneg) {
if (!(bp->port.supported & SUPPORTED_Autoneg)) {
- DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+ DP(NETIF_MSG_LINK, "autoneg not supported\n");
return -EINVAL;
}
@@ -8285,6 +8381,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
+static int bnx2x_set_flags(struct net_device *dev, u32 data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int changed = 0;
+ int rc = 0;
+
+ /* TPA requires Rx CSUM offloading */
+ if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
+ if (!(dev->features & NETIF_F_LRO)) {
+ dev->features |= NETIF_F_LRO;
+ bp->flags |= TPA_ENABLE_FLAG;
+ changed = 1;
+ }
+
+ } else if (dev->features & NETIF_F_LRO) {
+ dev->features &= ~NETIF_F_LRO;
+ bp->flags &= ~TPA_ENABLE_FLAG;
+ changed = 1;
+ }
+
+ if (changed && netif_running(dev)) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
+
+ return rc;
+}
+
static u32 bnx2x_get_rx_csum(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8419,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev)
static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
{
struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
bp->rx_csum = data;
- return 0;
+
+ /* Disable TPA, when Rx CSUM is disabled. Otherwise all
+ TPA'ed packets will be discarded due to wrong TCP CSUM */
+ if (!data) {
+ u32 flags = ethtool_op_get_flags(dev);
+
+ rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
+ }
+
+ return rc;
}
static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8469,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
{
int idx, i, rc = -ENODEV;
u32 wr_val = 0;
+ int port = BP_PORT(bp);
static const struct {
u32 offset0;
u32 offset1;
@@ -8400,7 +8535,6 @@ static int bnx2x_test_registers(struct bnx2x *bp)
for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
u32 offset, mask, save_val, val;
- int port = BP_PORT(bp);
offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
mask = reg_tbl[i].mask;
@@ -8446,16 +8580,17 @@ static int bnx2x_test_memory(struct bnx2x *bp)
static const struct {
char *name;
u32 offset;
- u32 mask;
+ u32 e1_mask;
+ u32 e1h_mask;
} prty_tbl[] = {
- { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
- { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
- { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
- { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
- { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
- { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
-
- { NULL, 0xffffffff, 0 }
+ { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
+ { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
+ { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
+ { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
+ { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
+ { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
+
+ { NULL, 0xffffffff, 0, 0 }
};
if (!netif_running(bp->dev))
@@ -8469,7 +8604,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
/* Check the parity status */
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
- if (val & ~(prty_tbl[i].mask)) {
+ if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
+ (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
@@ -8539,15 +8675,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
if (loopback_mode == BNX2X_MAC_LOOPBACK) {
bp->link_params.loopback_mode = LOOPBACK_BMAC;
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
} else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up);
@@ -8771,7 +8907,7 @@ static void bnx2x_self_test(struct net_device *dev,
if (!netif_running(dev))
return;
- /* offline tests are not suppoerted in MF mode */
+ /* offline tests are not supported in MF mode */
if (IS_E1HMF(bp))
etest->flags &= ~ETH_TEST_FL_OFFLINE;
@@ -8827,76 +8963,99 @@ static const struct {
long offset;
int size;
u32 flags;
- char string[ETH_GSTRING_LEN];
+#define STATS_FLAGS_PORT 1
+#define STATS_FLAGS_FUNC 2
+ u8 string[ETH_GSTRING_LEN];
} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
-/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" },
- { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" },
- { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" },
- { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" },
+/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
+ 8, STATS_FLAGS_FUNC, "rx_bytes" },
+ { STATS_OFFSET32(error_bytes_received_hi),
+ 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
+ { STATS_OFFSET32(total_bytes_transmitted_hi),
+ 8, STATS_FLAGS_FUNC, "tx_bytes" },
+ { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+ 8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, 1, "rx_ucast_packets" },
+ 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
{ STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, 1, "rx_mcast_packets" },
+ 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
{ STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, 1, "rx_bcast_packets" },
+ 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, 1, "tx_packets" },
+ 8, STATS_FLAGS_FUNC, "tx_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
- 8, 0, "tx_mac_errors" },
+ 8, STATS_FLAGS_PORT, "tx_mac_errors" },
/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
- 8, 0, "tx_carrier_errors" },
+ 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
- 8, 0, "rx_crc_errors" },
+ 8, STATS_FLAGS_PORT, "rx_crc_errors" },
{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
- 8, 0, "rx_align_errors" },
+ 8, STATS_FLAGS_PORT, "rx_align_errors" },
{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
- 8, 0, "tx_single_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
- 8, 0, "tx_multi_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
- 8, 0, "tx_deferred" },
+ 8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
- 8, 0, "tx_excess_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
- 8, 0, "tx_late_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_late_collisions" },
{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
- 8, 0, "tx_total_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_total_collisions" },
{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
- 8, 0, "rx_fragments" },
-/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" },
+ 8, STATS_FLAGS_PORT, "rx_fragments" },
+/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+ 8, STATS_FLAGS_PORT, "rx_jabbers" },
{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
- 8, 0, "rx_undersize_packets" },
+ 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
{ STATS_OFFSET32(jabber_packets_received),
- 4, 1, "rx_oversize_packets" },
+ 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
- 8, 0, "tx_64_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
- 8, 0, "tx_65_to_127_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
- 8, 0, "tx_128_to_255_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
- 8, 0, "tx_256_to_511_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
- 8, 0, "tx_512_to_1023_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
- 8, 0, "tx_1024_to_1522_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
- 8, 0, "tx_1523_to_9022_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
- 8, 0, "rx_xon_frames" },
+ 8, STATS_FLAGS_PORT, "rx_xon_frames" },
{ STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
- 8, 0, "rx_xoff_frames" },
- { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" },
- { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" },
+ 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
+ { STATS_OFFSET32(tx_stat_outxonsent_hi),
+ 8, STATS_FLAGS_PORT, "tx_xon_frames" },
+ { STATS_OFFSET32(tx_stat_outxoffsent_hi),
+ 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
- 8, 0, "rx_mac_ctrl_frames" },
- { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" },
- { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" },
- { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" },
- { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" },
-/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" }
+ 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+ { STATS_OFFSET32(mac_filter_discard),
+ 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+ { STATS_OFFSET32(no_buff_discard),
+ 4, STATS_FLAGS_FUNC, "rx_discards" },
+ { STATS_OFFSET32(xxoverflow_discard),
+ 4, STATS_FLAGS_PORT, "rx_fw_discards" },
+ { STATS_OFFSET32(brb_drop_hi),
+ 8, STATS_FLAGS_PORT, "brb_discard" },
+ { STATS_OFFSET32(brb_truncate_hi),
+ 8, STATS_FLAGS_PORT, "brb_truncate" },
+/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
+ 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
+ { STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
+/* 42 */{ STATS_OFFSET32(hw_csum_err),
+ 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
};
+#define IS_NOT_E1HMF_STAT(bp, i) \
+ (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
+
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9064,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+ if (IS_NOT_E1HMF_STAT(bp, i))
continue;
strcpy(buf + j*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
@@ -8925,7 +9084,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
int i, num_stats = 0;
for (i = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+ if (IS_NOT_E1HMF_STAT(bp, i))
continue;
num_stats++;
}
@@ -8940,7 +9099,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
int i, j;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+ if (IS_NOT_E1HMF_STAT(bp, i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9216,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
PCI_PM_CTRL_PME_STATUS));
if (pmcsr & PCI_PM_CTRL_STATE_MASK)
- /* delay required during transition out of D3hot */
+ /* delay required during transition out of D3hot */
msleep(20);
break;
@@ -9104,17 +9263,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
bnx2x_update_fpsb_idx(fp);
- if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
- (fp->tx_pkt_prod != fp->tx_pkt_cons))
+ if (BNX2X_HAS_TX_WORK(fp))
bnx2x_tx_int(fp, budget);
- if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
+ if (BNX2X_HAS_RX_WORK(fp))
work_done = bnx2x_rx_int(fp, budget);
- rmb(); /* bnx2x_has_work() reads the status block */
+ rmb(); /* BNX2X_HAS_WORK() reads the status block */
/* must not complete if we consumed full budget */
- if ((work_done < budget) && !bnx2x_has_work(fp)) {
+ if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
#ifdef BNX2X_STOP_ON_ERROR
poll_panic:
@@ -9131,7 +9289,7 @@ poll_panic:
/* we split the first BD into headers and data BDs
- * to ease the pain of our fellow micocode engineers
+ * to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
* So far this has only been observed to happen
* in Other Operating Systems(TM)
@@ -9238,7 +9396,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Check if LSO packet needs to be copied:
3 = 1 (for headers BD) + 2 (for PBD and last BD) */
int wnd_size = MAX_FETCH_BD - 3;
- /* Number of widnows to check */
+ /* Number of windows to check */
int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
int wnd_idx = 0;
int frag_idx = 0;
@@ -9340,7 +9498,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
- /* First, check if we need to linearaize the skb
+ /* First, check if we need to linearize the skb
(due to FW restrictions) */
if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
/* Statistics of linearization */
@@ -9349,7 +9507,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
"silently dropping this SKB\n");
dev_kfree_skb_any(skb);
- return 0;
+ return NETDEV_TX_OK;
}
}
@@ -9372,7 +9530,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_bd->general_data = (UNICAST_ADDRESS <<
ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
- tx_bd->general_data |= 1; /* header nbd */
+ /* header nbd */
+ tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
/* remember the first BD of the packet */
tx_buf->first_bd = fp->tx_bd_prod;
@@ -9451,7 +9610,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
+ nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
tx_bd->nbd = cpu_to_le16(nbd);
tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
@@ -9721,9 +9880,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev)) {
if (CHIP_IS_E1(bp))
- bnx2x_set_mac_addr_e1(bp);
+ bnx2x_set_mac_addr_e1(bp, 1);
else
- bnx2x_set_mac_addr_e1h(bp);
+ bnx2x_set_mac_addr_e1h(bp, 1);
}
return 0;
@@ -9734,6 +9893,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
struct bnx2x *bp = netdev_priv(dev);
+ int port = BP_PORT(bp);
int err;
switch (cmd) {
@@ -9749,7 +9909,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
mutex_lock(&bp->port.phy_mutex);
- err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
+ err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
DEFAULT_PHY_DEV_ADDR,
(data->reg_num & 0x1f), &mii_regval);
data->val_out = mii_regval;
@@ -9765,7 +9925,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
mutex_lock(&bp->port.phy_mutex);
- err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
+ err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
DEFAULT_PHY_DEV_ADDR,
(data->reg_num & 0x1f), data->val_in);
mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10301,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev);
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
@@ -10174,7 +10334,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
bnx2x_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
rtnl_unlock();
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 15c9a9946724..a67b0c358ae4 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * The registers description starts with the regsister Access type followed
+ * The registers description starts with the register Access type followed
* by size in bits. For example [RW 32]. The access types are:
* R - Read only
* RC - Clear on read
@@ -49,7 +49,7 @@
/* [RW 10] Write client 0: Assert pause threshold. */
#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
-/* [R 24] The number of full blocks occpied by port. */
+/* [R 24] The number of full blocks occupied by port. */
#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
/* [RW 1] Reset the design by software. */
#define BRB1_REG_SOFT_RESET 0x600dc
@@ -740,6 +740,7 @@
#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
#define HC_REG_ATTN_NUM_P0 0x108038
#define HC_REG_ATTN_NUM_P1 0x10803c
+#define HC_REG_COMMAND_REG 0x108180
#define HC_REG_CONFIG_0 0x108000
#define HC_REG_CONFIG_1 0x108004
#define HC_REG_FUNC_NUM_P0 0x1080ac
@@ -1372,6 +1373,23 @@
be asserted). */
#define MISC_REG_DRIVER_CONTROL_16 0xa5f0
#define MISC_REG_DRIVER_CONTROL_16_SIZE 2
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+ 32 clients. Each client can be controlled by one driver only. One in each
+ bit represent that this driver control the appropriate client (Ex: bit 5
+ is set means this driver control client number 5). addr1 = set; addr0 =
+ clear; read from both addresses will give the same result = status. write
+ to address 1 will set a request to control all the clients that their
+ appropriate bit (in the write command) is set. if the client is free (the
+ appropriate bit in all the other drivers is clear) one will be written to
+ that driver register; if the client isn't free the bit will remain zero.
+ if the appropriate bit is set (the driver request to gain control on a
+ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+ interrupt will be asserted). write to address 0 will set a request to
+ free all the clients that their appropriate bit (in the write command) is
+ set. if the appropriate bit is clear (the driver request to free a client
+ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+ be asserted). */
+#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
only. */
#define MISC_REG_E1HMF_MODE 0xa5f8
@@ -1394,13 +1412,13 @@
#define MISC_REG_GPIO 0xa490
/* [R 28] this field hold the last information that caused reserved
attention. bits [19:0] - address; [22:20] function; [23] reserved;
- [27:24] the master thatcaused the attention - according to the following
+ [27:24] the master that caused the attention - according to the following
encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
dbu; 8 = dmae */
#define MISC_REG_GRC_RSV_ATTN 0xa3c0
/* [R 28] this field hold the last information that caused timeout
attention. bits [19:0] - address; [22:20] function; [23] reserved;
- [27:24] the master thatcaused the attention - according to the following
+ [27:24] the master that caused the attention - according to the following
encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
dbu; 8 = dmae */
#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
@@ -1677,6 +1695,7 @@
/* [RW 8] init credit counter for port0 in LLH */
#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
#define NIG_REG_LLH0_XCM_MASK 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
/* [RW 1] send to BRB1 if no match on any of RMP rules. */
#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
/* [RW 2] Determine the classification participants. 0: no classification.1:
@@ -1727,6 +1746,9 @@
/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
for port0 */
#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
between 1024 and 1522 bytes for port0 */
#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
@@ -2298,7 +2320,7 @@
/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
-128k */
#define PXP2_REG_RQ_QM_P_SIZE 0x120050
-/* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
#define PXP2_REG_RQ_RBC_DONE 0x1201b0
/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
@@ -2406,7 +2428,7 @@
/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
buffer reaches this number has_payload will be asserted */
#define PXP2_REG_WR_DMAE_MPS 0x1205ec
-/* [RW 10] if Number of entries in dmae fifo will be higer than this
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
threshold then has_payload indication will be asserted; the default value
should be equal to &gt; write MBS size! */
#define PXP2_REG_WR_DMAE_TH 0x120368
@@ -2427,7 +2449,7 @@
/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
buffer reaches this number has_payload will be asserted */
#define PXP2_REG_WR_TSDM_MPS 0x1205d4
-/* [RW 10] if Number of entries in usdmdp fifo will be higer than this
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
threshold then has_payload indication will be asserted; the default value
should be equal to &gt; write MBS size! */
#define PXP2_REG_WR_USDMDP_TH 0x120348
@@ -3294,12 +3316,12 @@
#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
#define CFC_DEBUG1_REG_WRITE_AC (0x1<<4)
#define CFC_DEBUG1_REG_WRITE_AC_SIZE 4
-/* [R 1] debug only: This bit indicates wheter indicates that external
+/* [R 1] debug only: This bit indicates whether indicates that external
buffer was wrapped (oldest data was thrown); Relevant only when
~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */
#define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124
#define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1
-/* [R 1] debug only: This bit indicates wheter the internal buffer was
+/* [R 1] debug only: This bit indicates whether the internal buffer was
wrapped (oldest data was thrown) Relevant only when
~dbg_registers_debug_target=0 (internal buffer) */
#define DBG_REG_WRAP_ON_INT_BUFFER 0xc128
@@ -4944,6 +4966,7 @@
#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN (1L<<4)
#define MISC_REGISTERS_GPIO_0 0
#define MISC_REGISTERS_GPIO_1 1
#define MISC_REGISTERS_GPIO_2 2
@@ -4959,6 +4982,7 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
@@ -4993,7 +5017,9 @@
#define HW_LOCK_MAX_RESOURCE_VALUE 31
#define HW_LOCK_RESOURCE_8072_MDIO 0
#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
#define HW_LOCK_RESOURCE_SPIO 2
+#define HW_LOCK_RESOURCE_UNDI 5
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -5144,59 +5170,73 @@
#define GRCBASE_MISC_AEU GRCBASE_MISC
-/*the offset of the configuration space in the pci core register*/
+/* offset of configuration space in the pci core register */
#define PCICFG_OFFSET 0x2000
#define PCICFG_VENDOR_ID_OFFSET 0x00
#define PCICFG_DEVICE_ID_OFFSET 0x02
#define PCICFG_COMMAND_OFFSET 0x04
+#define PCICFG_COMMAND_IO_SPACE (1<<0)
+#define PCICFG_COMMAND_MEM_SPACE (1<<1)
+#define PCICFG_COMMAND_BUS_MASTER (1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
+#define PCICFG_COMMAND_PERR_ENA (1<<6)
+#define PCICFG_COMMAND_STEPPING (1<<7)
+#define PCICFG_COMMAND_SERR_ENA (1<<8)
+#define PCICFG_COMMAND_FAST_B2B (1<<9)
+#define PCICFG_COMMAND_INT_DISABLE (1<<10)
+#define PCICFG_COMMAND_RESERVED (0x1f<<11)
#define PCICFG_STATUS_OFFSET 0x06
-#define PCICFG_REVESION_ID 0x08
+#define PCICFG_REVESION_ID 0x08
#define PCICFG_CACHE_LINE_SIZE 0x0c
#define PCICFG_LATENCY_TIMER 0x0d
-#define PCICFG_BAR_1_LOW 0x10
-#define PCICFG_BAR_1_HIGH 0x14
-#define PCICFG_BAR_2_LOW 0x18
-#define PCICFG_BAR_2_HIGH 0x1c
-#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+#define PCICFG_BAR_1_LOW 0x10
+#define PCICFG_BAR_1_HIGH 0x14
+#define PCICFG_BAR_2_LOW 0x18
+#define PCICFG_BAR_2_HIGH 0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
-#define PCICFG_INT_LINE 0x3c
-#define PCICFG_INT_PIN 0x3d
-#define PCICFG_PM_CSR_OFFSET 0x4c
-#define PCICFG_GRC_ADDRESS 0x78
-#define PCICFG_GRC_DATA 0x80
+#define PCICFG_INT_LINE 0x3c
+#define PCICFG_INT_PIN 0x3d
+#define PCICFG_PM_CAPABILITY 0x48
+#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
+#define PCICFG_PM_CAPABILITY_DSI (1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
+#define PCICFG_PM_CSR_OFFSET 0x4c
+#define PCICFG_PM_CSR_STATE (0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
+#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+#define PCICFG_GRC_ADDRESS 0x78
+#define PCICFG_GRC_DATA 0x80
#define PCICFG_DEVICE_CONTROL 0xb4
#define PCICFG_LINK_CONTROL 0xbc
-#define PCICFG_COMMAND_IO_SPACE (1<<0)
-#define PCICFG_COMMAND_MEM_SPACE (1<<1)
-#define PCICFG_COMMAND_BUS_MASTER (1<<2)
-#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
-#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
-#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
-#define PCICFG_COMMAND_PERR_ENA (1<<6)
-#define PCICFG_COMMAND_STEPPING (1<<7)
-#define PCICFG_COMMAND_SERR_ENA (1<<8)
-#define PCICFG_COMMAND_FAST_B2B (1<<9)
-#define PCICFG_COMMAND_INT_DISABLE (1<<10)
-#define PCICFG_COMMAND_RESERVED (0x1f<<11)
-
-#define PCICFG_PM_CSR_STATE (0x3<<0)
-#define PCICFG_PM_CSR_PME_STATUS (1<<15)
#define BAR_USTRORM_INTMEM 0x400000
#define BAR_CSTRORM_INTMEM 0x410000
#define BAR_XSTRORM_INTMEM 0x420000
#define BAR_TSTRORM_INTMEM 0x430000
+/* for accessing the IGU in case of status block ACK */
#define BAR_IGU_INTMEM 0x440000
#define BAR_DOORBELL_OFFSET 0x800000
#define BAR_ME_REGISTER 0x450000
-
-#define GRC_CONFIG_2_SIZE_REG 0x408 /* config_2 offset */
-#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG 0x408
+#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
@@ -5213,11 +5253,11 @@
#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
-#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
-#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
-#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
-#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
-#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
@@ -5234,46 +5274,44 @@
#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
-#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
-#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
/* config_3 offset */
-#define GRC_CONFIG_3_SIZE_REG (0x40c)
-#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
-#define PCI_CONFIG_3_FORCE_PME (1L<<24)
-#define PCI_CONFIG_3_PME_STATUS (1L<<25)
-#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
-#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
-#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
-#define PCI_CONFIG_3_PCI_POWER (1L<<31)
-
-/* config_2 offset */
-#define GRC_CONFIG_2_SIZE_REG 0x408
+#define GRC_CONFIG_3_SIZE_REG 0x40c
+#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define PCI_CONFIG_3_PCI_POWER (1L<<31)
#define GRC_BAR2_CONFIG 0x4e0
-#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
-#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+
+#define PCI_PM_DATA_A 0x410
+#define PCI_PM_DATA_B 0x414
+#define PCI_ID_VAL1 0x434
+#define PCI_ID_VAL2 0x438
-#define PCI_PM_DATA_A (0x410)
-#define PCI_PM_DATA_B (0x414)
-#define PCI_ID_VAL1 (0x434)
-#define PCI_ID_VAL2 (0x438)
#define MDIO_REG_BANK_CL73_IEEEB0 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -5522,6 +5560,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_GEN_CTRL 0xca10
#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
#define MDIO_PMA_REG_ROM_VER1 0xca19
#define MDIO_PMA_REG_ROM_VER2 0xca1a
#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
@@ -5576,7 +5616,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
#define MDIO_AN_REG_CL37_AN 0xffe0
-#define MDIO_AN_REG_CL37_FD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
#define IGU_FUNC_BASE 0x0400
@@ -5600,4 +5641,13 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_INT_NOP 2
#define IGU_INT_NOP2 3
+#define COMMAND_REG_INT_ACK 0x0
+#define COMMAND_REG_PROD_UPD 0x4
+#define COMMAND_REG_ATTN_BITS_UPD 0x8
+#define COMMAND_REG_ATTN_BITS_SET 0xc
+#define COMMAND_REG_ATTN_BITS_CLR 0x10
+#define COMMAND_REG_COALESCE_NOW 0x14
+#define COMMAND_REG_SIMD_MASK 0x18
+#define COMMAND_REG_SIMD_NOMASK 0x1c
+
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index a8771ffc61e8..e07b5c51ec5b 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -23,12 +23,57 @@
static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY);
+ int ret;
+
+ ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER1, "PCMCIA PWR1");
+ if (ret)
+ goto err1;
+ ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER1, 0);
+ if (ret)
+ goto err2;
+
+ ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER2, "PCMCIA PWR2");
+ if (ret)
+ goto err2;
+ ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER2, 0);
+ if (ret)
+ goto err3;
+
+ ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_RESET, "PCMCIA RST");
+ if (ret)
+ goto err3;
+ ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_RESET, 1);
+ if (ret)
+ goto err4;
+
+ ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_READY, "PCMCIA RDY");
+ if (ret)
+ goto err4;
+ ret = gpio_direction_input(GPIO_NR_PALMTX_PCMCIA_READY);
+ if (ret)
+ goto err5;
+
+ skt->irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY);
return 0;
+
+err5:
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_READY);
+err4:
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET);
+err3:
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2);
+err2:
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1);
+err1:
+ return ret;
}
static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_READY);
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET);
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2);
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1);
}
static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
@@ -109,7 +154,7 @@ static void __exit palmtx_pcmcia_exit(void)
platform_device_unregister(palmtx_pcmcia_device);
}
-fs_initcall(palmtx_pcmcia_init);
+module_init(palmtx_pcmcia_init);
module_exit(palmtx_pcmcia_exit);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 856cc1af40df..35dcc06eb3e2 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/rtc.h>
-#include <linux/smp_lock.h>
#include "rtc-core.h"
static dev_t rtc_devt;
@@ -27,11 +26,8 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
struct rtc_device, char_dev);
const struct rtc_class_ops *ops = rtc->ops;
- lock_kernel();
- if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) {
- err = -EBUSY;
- goto out;
- }
+ if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
+ return -EBUSY;
file->private_data = rtc;
@@ -41,13 +37,11 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
rtc->irq_data = 0;
spin_unlock_irq(&rtc->irq_lock);
- goto out;
+ return 0;
}
/* something has gone wrong */
clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
-out:
- unlock_kernel();
return err;
}
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index fbb90b1e4098..a81adab6e515 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -482,7 +482,7 @@ isl1208_sysfs_register(struct device *dev)
static int
isl1208_sysfs_unregister(struct device *dev)
{
- device_remove_file(dev, &dev_attr_atrim);
+ device_remove_file(dev, &dev_attr_dtrim);
device_remove_file(dev, &dev_attr_atrim);
device_remove_file(dev, &dev_attr_usr);
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 73a86d09bba8..9c129248466c 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -7,13 +7,13 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/of_device.h>
#include <asm/system.h>
#include <asm/sbus.h>
#include <asm/dma.h>
#include <asm/oplib.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/bpp.h>
#include <asm/irq.h>
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fcdd73f25625..994da56fffed 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -680,7 +680,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
}
-const struct scsi_dh_devlist alua_dev_list[] = {
+static const struct scsi_dh_devlist alua_dev_list[] = {
{"HP", "MSA VOLUME" },
{"HP", "HSV101" },
{"HP", "HSV111" },
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index aa46b131b20e..b9d23e9e9a44 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -562,7 +562,7 @@ done:
return result;
}
-const struct scsi_dh_devlist clariion_dev_list[] = {
+static const struct scsi_dh_devlist clariion_dev_list[] = {
{"DGC", "RAID"},
{"DGC", "DISK"},
{"DGC", "VRAID"},
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9c7a1f8ebb72..a6a4ef3ad51c 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -282,7 +282,7 @@ static int hp_sw_activate(struct scsi_device *sdev)
return ret;
}
-const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
+static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
{"COMPAQ", "MSA1000 VOLUME"},
{"COMPAQ", "HSV110"},
{"HP", "HSV100"},
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b093a501f8ae..e7c7b4ebc1fe 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -574,7 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
return SCSI_RETURN_NOT_HANDLED;
}
-const struct scsi_dh_devlist rdac_dev_list[] = {
+static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1722"},
{"IBM", "1724"},
{"IBM", "1726"},
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 3b4a14e355c1..77cb34270fc1 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -449,6 +449,7 @@ config SERIAL_CLPS711X_CONSOLE
config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
depends on ARM && PLAT_S3C24XX
+ select SERIAL_CORE
help
Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
providing /dev/ttySAC0, 1 and 2 (note, some machines may not
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index aeeec5588afd..e41766d08035 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -17,11 +17,11 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <asm/hypervisor.h>
#include <asm/spitfire.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/irq.h>
#if defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 15ee497e1c78..29b4458abf74 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -32,11 +32,11 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index e24e68235088..a378464f9292 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -35,11 +35,11 @@
#include <linux/serial_reg.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 0f3d69b86d67..3cb4c8aee13f 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -32,11 +32,11 @@
#include <linux/serio.h>
#endif
#include <linux/init.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 964124b60db2..75e86865234c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -226,10 +226,11 @@ EXPORT_SYMBOL_GPL(spi_alloc_device);
* Companion function to spi_alloc_device. Devices allocated with
* spi_alloc_device can be added onto the spi bus with this function.
*
- * Returns 0 on success; non-zero on failure
+ * Returns 0 on success; negative errno on failure
*/
int spi_add_device(struct spi_device *spi)
{
+ static DEFINE_MUTEX(spi_add_lock);
struct device *dev = spi->master->dev.parent;
int status;
@@ -246,26 +247,43 @@ int spi_add_device(struct spi_device *spi)
"%s.%u", spi->master->dev.bus_id,
spi->chip_select);
- /* drivers may modify this initial i/o setup */
+
+ /* We need to make sure there's no other device with this
+ * chipselect **BEFORE** we call setup(), else we'll trash
+ * its configuration. Lock against concurrent add() calls.
+ */
+ mutex_lock(&spi_add_lock);
+
+ if (bus_find_device_by_name(&spi_bus_type, NULL, spi->dev.bus_id)
+ != NULL) {
+ dev_err(dev, "chipselect %d already in use\n",
+ spi->chip_select);
+ status = -EBUSY;
+ goto done;
+ }
+
+ /* Drivers may modify this initial i/o setup, but will
+ * normally rely on the device being setup. Devices
+ * using SPI_CS_HIGH can't coexist well otherwise...
+ */
status = spi->master->setup(spi);
if (status < 0) {
dev_err(dev, "can't %s %s, status %d\n",
"setup", spi->dev.bus_id, status);
- return status;
+ goto done;
}
- /* driver core catches callers that misbehave by defining
- * devices that already exist.
- */
+ /* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
- if (status < 0) {
+ if (status < 0)
dev_err(dev, "can't %s %s, status %d\n",
"add", spi->dev.bus_id, status);
- return status;
- }
+ else
+ dev_dbg(dev, "registered child %s\n", spi->dev.bus_id);
- dev_dbg(dev, "registered child %s\n", spi->dev.bus_id);
- return 0;
+done:
+ mutex_unlock(&spi_add_lock);
+ return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 755823cdf62a..bcefbddeba50 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,16 +95,18 @@ config USB
source "drivers/usb/core/Kconfig"
+source "drivers/usb/mon/Kconfig"
+
source "drivers/usb/host/Kconfig"
+source "drivers/usb/musb/Kconfig"
+
source "drivers/usb/class/Kconfig"
source "drivers/usb/storage/Kconfig"
source "drivers/usb/image/Kconfig"
-source "drivers/usb/mon/Kconfig"
-
comment "USB port drivers"
depends on USB
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 507a9bd0d77c..9aea43a8c4ad 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -602,7 +602,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
offd = le32_to_cpu(buf[offb++]);
if (offd >= size) {
if (printk_ratelimit())
- usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n",
+ usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n",
offd, cm);
ret = -EIO;
goto cleanup;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0725b1871f23..efc4373ededb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -51,6 +51,7 @@
*/
#undef DEBUG
+#undef VERBOSE_DEBUG
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -70,6 +71,9 @@
#include "cdc-acm.h"
+
+#define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */
+
/*
* Version Information
*/
@@ -85,6 +89,12 @@ static DEFINE_MUTEX(open_mutex);
#define ACM_READY(acm) (acm && acm->dev && acm->used)
+#ifdef VERBOSE_DEBUG
+#define verbose 1
+#else
+#define verbose 0
+#endif
+
/*
* Functions for ACM control messages.
*/
@@ -136,19 +146,17 @@ static int acm_wb_alloc(struct acm *acm)
static int acm_wb_is_avail(struct acm *acm)
{
int i, n;
+ unsigned long flags;
n = ACM_NW;
+ spin_lock_irqsave(&acm->write_lock, flags);
for (i = 0; i < ACM_NW; i++) {
n -= acm->wb[i].use;
}
+ spin_unlock_irqrestore(&acm->write_lock, flags);
return n;
}
-static inline int acm_wb_is_used(struct acm *acm, int wbn)
-{
- return acm->wb[wbn].use;
-}
-
/*
* Finish write.
*/
@@ -157,7 +165,6 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
unsigned long flags;
spin_lock_irqsave(&acm->write_lock, flags);
- acm->write_ready = 1;
wb->use = 0;
acm->transmitting--;
spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -190,40 +197,25 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
static int acm_write_start(struct acm *acm, int wbn)
{
unsigned long flags;
- struct acm_wb *wb;
+ struct acm_wb *wb = &acm->wb[wbn];
int rc;
spin_lock_irqsave(&acm->write_lock, flags);
if (!acm->dev) {
+ wb->use = 0;
spin_unlock_irqrestore(&acm->write_lock, flags);
return -ENODEV;
}
- if (!acm->write_ready) {
- spin_unlock_irqrestore(&acm->write_lock, flags);
- return 0; /* A white lie */
- }
-
- wb = &acm->wb[wbn];
- if(acm_wb_is_avail(acm) <= 1)
- acm->write_ready = 0;
-
dbg("%s susp_count: %d", __func__, acm->susp_count);
if (acm->susp_count) {
- acm->old_ready = acm->write_ready;
acm->delayed_wb = wb;
- acm->write_ready = 0;
schedule_work(&acm->waker);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
usb_mark_last_busy(acm->dev);
- if (!acm_wb_is_used(acm, wbn)) {
- spin_unlock_irqrestore(&acm->write_lock, flags);
- return 0;
- }
-
rc = acm_start_wb(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -488,22 +480,28 @@ urbs:
/* data interface wrote those outgoing bytes */
static void acm_write_bulk(struct urb *urb)
{
- struct acm *acm;
struct acm_wb *wb = urb->context;
+ struct acm *acm = wb->instance;
- dbg("Entering acm_write_bulk with status %d", urb->status);
+ if (verbose || urb->status
+ || (urb->actual_length != urb->transfer_buffer_length))
+ dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n",
+ urb->actual_length,
+ urb->transfer_buffer_length,
+ urb->status);
- acm = wb->instance;
acm_write_done(acm, wb);
if (ACM_READY(acm))
schedule_work(&acm->work);
+ else
+ wake_up_interruptible(&acm->drain_wait);
}
static void acm_softint(struct work_struct *work)
{
struct acm *acm = container_of(work, struct acm, work);
- dbg("Entering acm_softint.");
-
+
+ dev_vdbg(&acm->data->dev, "tx work\n");
if (!ACM_READY(acm))
return;
tty_wakeup(acm->tty);
@@ -512,7 +510,6 @@ static void acm_softint(struct work_struct *work)
static void acm_waker(struct work_struct *waker)
{
struct acm *acm = container_of(waker, struct acm, waker);
- long flags;
int rv;
rv = usb_autopm_get_interface(acm->control);
@@ -524,9 +521,6 @@ static void acm_waker(struct work_struct *waker)
acm_start_wb(acm, acm->delayed_wb);
acm->delayed_wb = NULL;
}
- spin_lock_irqsave(&acm->write_lock, flags);
- acm->write_ready = acm->old_ready;
- spin_unlock_irqrestore(&acm->write_lock, flags);
usb_autopm_put_interface(acm->control);
}
@@ -628,6 +622,8 @@ static void acm_tty_unregister(struct acm *acm)
kfree(acm);
}
+static int acm_tty_chars_in_buffer(struct tty_struct *tty);
+
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
@@ -642,6 +638,13 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
if (acm->dev) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
+
+ /* try letting the last writes drain naturally */
+ wait_event_interruptible_timeout(acm->drain_wait,
+ (ACM_NW == acm_wb_is_avail(acm))
+ || !acm->dev,
+ ACM_CLOSE_TIMEOUT * HZ);
+
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
@@ -697,7 +700,7 @@ static int acm_tty_write_room(struct tty_struct *tty)
* Do not let the line discipline to know that we have a reserve,
* or it might get too enthusiastic.
*/
- return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0;
+ return acm_wb_is_avail(acm) ? acm->writesize : 0;
}
static int acm_tty_chars_in_buffer(struct tty_struct *tty)
@@ -1072,11 +1075,11 @@ skip_normal_probe:
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
INIT_WORK(&acm->waker, acm_waker);
+ init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->write_ready = 1;
acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
@@ -1108,9 +1111,11 @@ skip_normal_probe:
rcv->instance = acm;
}
for (i = 0; i < num_rx_buf; i++) {
- struct acm_rb *buf = &(acm->rb[i]);
+ struct acm_rb *rb = &(acm->rb[i]);
- if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) {
+ rb->base = usb_buffer_alloc(acm->dev, readsize,
+ GFP_KERNEL, &rb->dma);
+ if (!rb->base) {
dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n");
goto alloc_fail7;
}
@@ -1172,6 +1177,7 @@ skip_countries:
acm_set_line(acm, &acm->line);
usb_driver_claim_interface(&acm_driver, data_interface, acm);
+ usb_set_intfdata(data_interface, acm);
usb_get_intf(control_interface);
tty_register_device(acm_tty_driver, minor, &control_interface->dev);
@@ -1221,11 +1227,11 @@ static void acm_disconnect(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
- mutex_lock(&open_mutex);
- if (!acm || !acm->dev) {
- mutex_unlock(&open_mutex);
+ /* sibling interface is already cleaning up */
+ if (!acm)
return;
- }
+
+ mutex_lock(&open_mutex);
if (acm->country_codes){
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 85c3aaaab7c5..1f95e7aa1b66 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -106,8 +106,6 @@ struct acm {
struct list_head spare_read_bufs;
struct list_head filled_read_bufs;
int write_used; /* number of non-empty write buffers */
- int write_ready; /* write urb is not running */
- int old_ready;
int processing;
int transmitting;
spinlock_t write_lock;
@@ -115,6 +113,7 @@ struct acm {
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
struct work_struct waker;
+ wait_queue_head_t drain_wait; /* close processing */
struct tasklet_struct urb_task; /* rx processing */
spinlock_t throttle_lock; /* synchronize throtteling and read callback */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ddb54e14a5c5..2be37fe466f2 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -774,7 +774,6 @@ void usb_deregister(struct usb_driver *driver)
}
EXPORT_SYMBOL_GPL(usb_deregister);
-
/* Forced unbinding of a USB interface driver, either because
* it doesn't support pre_reset/post_reset/reset_resume or
* because it doesn't support suspend/resume.
@@ -821,6 +820,8 @@ void usb_rebind_intf(struct usb_interface *intf)
dev_warn(&intf->dev, "rebind failed: %d\n", rc);
}
+#ifdef CONFIG_PM
+
#define DO_UNBIND 0
#define DO_REBIND 1
@@ -872,8 +873,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
}
}
-#ifdef CONFIG_PM
-
/* Caller has locked udev's pm_mutex */
static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
{
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 586d6f1376cf..286b4431a097 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
- device_del(&interface->dev);
usb_remove_sysfs_intf_files(interface);
+ device_del(&interface->dev);
}
/* Now that the interfaces are unbound, nobody should
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c6a8c6b1116a..acc95b2ac6f8 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -284,6 +284,16 @@ config USB_LH7A40X
default USB_GADGET
select USB_GADGET_SELECTED
+# built in ../musb along with host support
+config USB_GADGET_MUSB_HDRC
+ boolean "Inventra HDRC USB Peripheral (TI, ...)"
+ depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+ select USB_GADGET_DUALSPEED
+ select USB_GADGET_SELECTED
+ help
+ This OTG-capable silicon IP is used in dual designs including
+ the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+
config USB_GADGET_OMAP
boolean "OMAP USB Device Controller"
depends on ARCH_OMAP
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 21d1406af9ee..7600a0c78753 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -542,13 +542,14 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
req->req.context = dum;
req->req.complete = fifo_complete;
+ list_add_tail(&req->queue, &ep->queue);
spin_unlock (&dum->lock);
_req->actual = _req->length;
_req->status = 0;
_req->complete (_ep, _req);
spin_lock (&dum->lock);
- }
- list_add_tail (&req->queue, &ep->queue);
+ } else
+ list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore (&dum->lock, flags);
/* real hardware would likely enable transfers here, in case
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d8faccf27895..5ee1590b8e9c 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -47,18 +47,37 @@ struct f_acm {
u8 ctrl_id, data_id;
u8 port_num;
- struct usb_descriptor_header **fs_function;
+ u8 pending;
+
+ /* lock is mostly for pending and notify_req ... they get accessed
+ * by callbacks both from tty (open/close/break) under its spinlock,
+ * and notify_req.complete() which can't use that lock.
+ */
+ spinlock_t lock;
+
struct acm_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct acm_ep_descs hs;
struct usb_ep *notify;
struct usb_endpoint_descriptor *notify_desc;
+ struct usb_request *notify_req;
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
+
+ /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
u16 port_handshake_bits;
-#define RS232_RTS (1 << 1) /* unused with full duplex */
-#define RS232_DTR (1 << 0) /* host is ready for data r/w */
+#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+
+ /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
+ u16 serial_state;
+#define ACM_CTRL_OVERRUN (1 << 6)
+#define ACM_CTRL_PARITY (1 << 5)
+#define ACM_CTRL_FRAMING (1 << 4)
+#define ACM_CTRL_RI (1 << 3)
+#define ACM_CTRL_BRK (1 << 2)
+#define ACM_CTRL_DSR (1 << 1)
+#define ACM_CTRL_DCD (1 << 0)
};
static inline struct f_acm *func_to_acm(struct usb_function *f)
@@ -66,12 +85,17 @@ static inline struct f_acm *func_to_acm(struct usb_function *f)
return container_of(f, struct f_acm, port.func);
}
+static inline struct f_acm *port_to_acm(struct gserial *p)
+{
+ return container_of(p, struct f_acm, port);
+}
+
/*-------------------------------------------------------------------------*/
/* notification endpoint uses smallish and infrequent fixed-size messages */
#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
-#define GS_NOTIFY_MAXPACKET 8
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
/* interface and class descriptors: */
@@ -117,7 +141,7 @@ static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
.bLength = sizeof(acm_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
- .bmCapabilities = (1 << 1),
+ .bmCapabilities = USB_CDC_CAP_LINE,
};
static struct usb_cdc_union_desc acm_union_desc __initdata = {
@@ -277,6 +301,11 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
/* composite driver infrastructure handles everything except
* CDC class messages; interface activation uses set_alt().
+ *
+ * Note CDC spec table 4 lists the ACM request profile. It requires
+ * encapsulated command support ... we don't handle any, and respond
+ * to them by stalling. Options include get/set/clear comm features
+ * (not that useful) and SEND_BREAK.
*/
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
@@ -312,7 +341,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
value = 0;
/* FIXME we should not allow data to flow until the
- * host sets the RS232_DTR bit; and when it clears
+ * host sets the ACM_CTRL_DTR bit; and when it clears
* that bit, we should return to that no-flow state.
*/
acm->port_handshake_bits = w_value;
@@ -350,9 +379,6 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* we know alt == 0, so this is an activation or a reset */
if (intf == acm->ctrl_id) {
- /* REVISIT this may need more work when we start to
- * send notifications ...
- */
if (acm->notify->driver_data) {
VDBG(cdev, "reset acm control interface %d\n", intf);
usb_ep_disable(acm->notify);
@@ -397,6 +423,128 @@ static void acm_disable(struct usb_function *f)
/*-------------------------------------------------------------------------*/
+/**
+ * acm_cdc_notify - issue CDC notification to host
+ * @acm: wraps host to be notified
+ * @type: notification type
+ * @value: Refer to cdc specs, wValue field.
+ * @data: data to be sent
+ * @length: size of data
+ * Context: irqs blocked, acm->lock held, acm_notify_req non-null
+ *
+ * Returns zero on sucess or a negative errno.
+ *
+ * See section 6.3.5 of the CDC 1.1 specification for information
+ * about the only notification we issue: SerialState change.
+ */
+static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
+ void *data, unsigned length)
+{
+ struct usb_ep *ep = acm->notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+
+ req = acm->notify_req;
+ acm->notify_req = NULL;
+ acm->pending = false;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(acm->ctrl_id);
+ notify->wLength = cpu_to_le16(length);
+ memcpy(buf, data, length);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(acm->port.func.config->cdev,
+ "acm ttyGS%d can't notify serial state, %d\n",
+ acm->port_num, status);
+ acm->notify_req = req;
+ }
+
+ return status;
+}
+
+static int acm_notify_serial_state(struct f_acm *acm)
+{
+ struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+ int status;
+
+ spin_lock(&acm->lock);
+ if (acm->notify_req) {
+ DBG(cdev, "acm ttyGS%d serial state %04x\n",
+ acm->port_num, acm->serial_state);
+ status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &acm->serial_state, sizeof(acm->serial_state));
+ } else {
+ acm->pending = true;
+ status = 0;
+ }
+ spin_unlock(&acm->lock);
+ return status;
+}
+
+static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_acm *acm = req->context;
+ u8 doit = false;
+
+ /* on this call path we do NOT hold the port spinlock,
+ * which is why ACM needs its own spinlock
+ */
+ spin_lock(&acm->lock);
+ if (req->status != -ESHUTDOWN)
+ doit = acm->pending;
+ acm->notify_req = req;
+ spin_unlock(&acm->lock);
+
+ if (doit)
+ acm_notify_serial_state(acm);
+}
+
+/* connect == the TTY link is open */
+
+static void acm_connect(struct gserial *port)
+{
+ struct f_acm *acm = port_to_acm(port);
+
+ acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ acm_notify_serial_state(acm);
+}
+
+static void acm_disconnect(struct gserial *port)
+{
+ struct f_acm *acm = port_to_acm(port);
+
+ acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ acm_notify_serial_state(acm);
+}
+
+static int acm_send_break(struct gserial *port, int duration)
+{
+ struct f_acm *acm = port_to_acm(port);
+ u16 state;
+
+ state = acm->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ acm->serial_state = state;
+ return acm_notify_serial_state(acm);
+}
+
+/*-------------------------------------------------------------------------*/
+
/* ACM function driver setup/binding */
static int __init
acm_bind(struct usb_configuration *c, struct usb_function *f)
@@ -445,8 +593,20 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm->notify = ep;
ep->driver_data = cdev; /* claim */
+ /* allocate notification */
+ acm->notify_req = gs_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (!acm->notify_req)
+ goto fail;
+
+ acm->notify_req->complete = acm_cdc_notify_complete;
+ acm->notify_req->context = acm;
+
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(acm_fs_function);
+ if (!f->descriptors)
+ goto fail;
acm->fs.in = usb_find_endpoint(acm_fs_function,
f->descriptors, &acm_fs_in_desc);
@@ -478,8 +638,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
f->hs_descriptors, &acm_hs_notify_desc);
}
- /* FIXME provide a callback for triggering notifications */
-
DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
acm->port_num,
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -488,6 +646,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail:
+ if (acm->notify_req)
+ gs_free_req(acm->notify, acm->notify_req);
+
/* we might as well release our claims on endpoints */
if (acm->notify)
acm->notify->driver_data = NULL;
@@ -504,10 +665,13 @@ fail:
static void
acm_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ struct f_acm *acm = func_to_acm(f);
+
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
- kfree(func_to_acm(f));
+ gs_free_req(acm->notify, acm->notify_req);
+ kfree(acm);
}
/* Some controllers can't support CDC ACM ... */
@@ -571,8 +735,14 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
if (!acm)
return -ENOMEM;
+ spin_lock_init(&acm->lock);
+
acm->port_num = port_num;
+ acm->port.connect = acm_connect;
+ acm->port.disconnect = acm_disconnect;
+ acm->port.send_break = acm_send_break;
+
acm->port.func.name = "acm";
acm->port.func.strings = acm_strings;
/* descriptors are per-instance copies */
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 0822e9d7693a..a2b5c092bda0 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -63,9 +63,7 @@ struct f_ecm {
char ethaddr[14];
- struct usb_descriptor_header **fs_function;
struct ecm_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct ecm_ep_descs hs;
struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 61652f0f13fd..659b3d9671c4 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -85,9 +85,7 @@ struct f_rndis {
u8 ethaddr[ETH_ALEN];
int config;
- struct usb_descriptor_header **fs_function;
struct rndis_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct rndis_ep_descs hs;
struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 1b6bde9aaed5..fe5674db344b 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -36,9 +36,7 @@ struct f_gser {
u8 data_id;
u8 port_num;
- struct usb_descriptor_header **fs_function;
struct gser_descs fs;
- struct usb_descriptor_header **hs_function;
struct gser_descs hs;
};
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index afeab9a0523f..acb8d233aa1d 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -66,9 +66,7 @@ struct f_gether {
char ethaddr[14];
- struct usb_descriptor_header **fs_function;
struct geth_descs fs;
- struct usb_descriptor_header **hs_function;
struct geth_descs hs;
};
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 5246e8fef2b2..17d9905101b7 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -11,6 +11,10 @@
* Some are available on 2.4 kernels; several are available, but not
* yet pushed in the 2.6 mainline tree.
*/
+
+#ifndef __GADGET_CHIPS_H
+#define __GADGET_CHIPS_H
+
#ifdef CONFIG_USB_GADGET_NET2280
#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
#else
@@ -237,3 +241,5 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
/* Everything else is *presumably* fine ... */
return true;
}
+
+#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 376e80c07530..574c53831a05 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -54,6 +54,7 @@
#include <mach/dma.h>
#include <mach/usb.h>
+#include <mach/control.h>
#include "omap_udc.h"
@@ -2310,10 +2311,10 @@ static int proc_otg_show(struct seq_file *s)
u32 trans;
char *ctrl_name;
- tmp = OTG_REV_REG;
+ tmp = omap_readl(OTG_REV);
if (cpu_is_omap24xx()) {
ctrl_name = "control_devconf";
- trans = CONTROL_DEVCONF_REG;
+ trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
} else {
ctrl_name = "tranceiver_ctrl";
trans = omap_readw(USB_TRANSCEIVER_CTRL);
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index abf9505d3a75..53d59287f2bc 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -52,13 +52,16 @@
* is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
*/
+#define PREFIX "ttyGS"
+
/*
* gserial is the lifecycle interface, used by USB functions
* gs_port is the I/O nexus, used by the tty driver
* tty_struct links to the tty/filesystem framework
*
* gserial <---> gs_port ... links will be null when the USB link is
- * inactive; managed by gserial_{connect,disconnect}().
+ * inactive; managed by gserial_{connect,disconnect}(). each gserial
+ * instance can wrap its own USB control protocol.
* gserial->ioport == usb_ep->driver_data ... gs_port
* gs_port->port_usb ... gserial
*
@@ -100,6 +103,8 @@ struct gs_port {
wait_queue_head_t close_wait; /* wait for last close */
struct list_head read_pool;
+ struct list_head read_queue;
+ unsigned n_read;
struct tasklet_struct push;
struct list_head write_pool;
@@ -177,7 +182,7 @@ static void gs_buf_clear(struct gs_buf *gb)
/*
* gs_buf_data_avail
*
- * Return the number of bytes of data available in the circular
+ * Return the number of bytes of data written into the circular
* buffer.
*/
static unsigned gs_buf_data_avail(struct gs_buf *gb)
@@ -278,7 +283,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
* Allocate a usb_request and its buffer. Returns a pointer to the
* usb_request or NULL if there is an error.
*/
-static struct usb_request *
+struct usb_request *
gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
{
struct usb_request *req;
@@ -302,7 +307,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
*
* Free a usb_request and its buffer.
*/
-static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
+void gs_free_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
usb_ep_free_request(ep, req);
@@ -367,11 +372,9 @@ __acquires(&port->port_lock)
req->length = len;
list_del(&req->list);
-#ifdef VERBOSE_DEBUG
- pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n",
- __func__, in->name, len, *((u8 *)req->buf),
+ pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
+ port->port_num, len, *((u8 *)req->buf),
*((u8 *)req->buf+1), *((u8 *)req->buf+2));
-#endif
/* Drop lock while we call out of driver; completions
* could be issued while we do so. Disconnection may
@@ -401,56 +404,6 @@ __acquires(&port->port_lock)
return status;
}
-static void gs_rx_push(unsigned long _port)
-{
- struct gs_port *port = (void *)_port;
- struct tty_struct *tty = port->port_tty;
-
- /* With low_latency, tty_flip_buffer_push() doesn't put its
- * real work through a workqueue, so the ldisc has a better
- * chance to keep up with peak USB data rates.
- */
- if (tty) {
- tty_flip_buffer_push(tty);
- wake_up_interruptible(&tty->read_wait);
- }
-}
-
-/*
- * gs_recv_packet
- *
- * Called for each USB packet received. Reads the packet
- * header and stuffs the data in the appropriate tty buffer.
- * Returns 0 if successful, or a negative error number.
- *
- * Called during USB completion routine, on interrupt time.
- * With port_lock.
- */
-static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size)
-{
- unsigned len;
- struct tty_struct *tty;
-
- /* I/O completions can continue for a while after close(), until the
- * request queue empties. Just discard any data we receive, until
- * something reopens this TTY ... as if there were no HW flow control.
- */
- tty = port->port_tty;
- if (tty == NULL) {
- pr_vdebug("%s: ttyGS%d, after close\n",
- __func__, port->port_num);
- return -EIO;
- }
-
- len = tty_insert_flip_string(tty, packet, size);
- if (len > 0)
- tasklet_schedule(&port->push);
- if (len < size)
- pr_debug("%s: ttyGS%d, drop %d bytes\n",
- __func__, port->port_num, size - len);
- return 0;
-}
-
/*
* Context: caller owns port_lock, and port_usb is set
*/
@@ -469,9 +422,9 @@ __acquires(&port->port_lock)
int status;
struct tty_struct *tty;
- /* no more rx if closed or throttled */
+ /* no more rx if closed */
tty = port->port_tty;
- if (!tty || test_bit(TTY_THROTTLED, &tty->flags))
+ if (!tty)
break;
req = list_entry(pool->next, struct usb_request, list);
@@ -500,36 +453,134 @@ __acquires(&port->port_lock)
return started;
}
-static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+/*
+ * RX tasklet takes data out of the RX queue and hands it up to the TTY
+ * layer until it refuses to take any more data (or is throttled back).
+ * Then it issues reads for any further data.
+ *
+ * If the RX queue becomes full enough that no usb_request is queued,
+ * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
+ * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
+ * can be buffered before the TTY layer's buffers (currently 64 KB).
+ */
+static void gs_rx_push(unsigned long _port)
{
- int status;
- struct gs_port *port = ep->driver_data;
+ struct gs_port *port = (void *)_port;
+ struct tty_struct *tty;
+ struct list_head *queue = &port->read_queue;
+ bool disconnect = false;
+ bool do_push = false;
- spin_lock(&port->port_lock);
- list_add(&req->list, &port->read_pool);
+ /* hand any queued data to the tty */
+ spin_lock_irq(&port->port_lock);
+ tty = port->port_tty;
+ while (!list_empty(queue)) {
+ struct usb_request *req;
- switch (req->status) {
- case 0:
- /* normal completion */
- status = gs_recv_packet(port, req->buf, req->actual);
- if (status && status != -EIO)
- pr_debug("%s: %s %s err %d\n",
- __func__, "recv", ep->name, status);
- gs_start_rx(port);
- break;
+ req = list_first_entry(queue, struct usb_request, list);
- case -ESHUTDOWN:
- /* disconnect */
- pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
- break;
+ /* discard data if tty was closed */
+ if (!tty)
+ goto recycle;
- default:
- /* presumably a transient fault */
- pr_warning("%s: unexpected %s status %d\n",
- __func__, ep->name, req->status);
- gs_start_rx(port);
- break;
+ /* leave data queued if tty was rx throttled */
+ if (test_bit(TTY_THROTTLED, &tty->flags))
+ break;
+
+ switch (req->status) {
+ case -ESHUTDOWN:
+ disconnect = true;
+ pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
+ break;
+
+ default:
+ /* presumably a transient fault */
+ pr_warning(PREFIX "%d: unexpected RX status %d\n",
+ port->port_num, req->status);
+ /* FALLTHROUGH */
+ case 0:
+ /* normal completion */
+ break;
+ }
+
+ /* push data to (open) tty */
+ if (req->actual) {
+ char *packet = req->buf;
+ unsigned size = req->actual;
+ unsigned n;
+ int count;
+
+ /* we may have pushed part of this packet already... */
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ count = tty_insert_flip_string(tty, packet, size);
+ if (count)
+ do_push = true;
+ if (count != size) {
+ /* stop pushing; TTY layer can't handle more */
+ port->n_read += count;
+ pr_vdebug(PREFIX "%d: rx block %d/%d\n",
+ port->port_num,
+ count, req->actual);
+ break;
+ }
+ port->n_read = 0;
+ }
+recycle:
+ list_move(&req->list, &port->read_pool);
}
+
+ /* Push from tty to ldisc; this is immediate with low_latency, and
+ * may trigger callbacks to this driver ... so drop the spinlock.
+ */
+ if (tty && do_push) {
+ spin_unlock_irq(&port->port_lock);
+ tty_flip_buffer_push(tty);
+ wake_up_interruptible(&tty->read_wait);
+ spin_lock_irq(&port->port_lock);
+
+ /* tty may have been closed */
+ tty = port->port_tty;
+ }
+
+
+ /* We want our data queue to become empty ASAP, keeping data
+ * in the tty and ldisc (not here). If we couldn't push any
+ * this time around, there may be trouble unless there's an
+ * implicit tty_unthrottle() call on its way...
+ *
+ * REVISIT we should probably add a timer to keep the tasklet
+ * from starving ... but it's not clear that case ever happens.
+ */
+ if (!list_empty(queue) && tty) {
+ if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (do_push)
+ tasklet_schedule(&port->push);
+ else
+ pr_warning(PREFIX "%d: RX not scheduled?\n",
+ port->port_num);
+ }
+ }
+
+ /* If we're still connected, refill the USB RX queue. */
+ if (!disconnect && port->port_usb)
+ gs_start_rx(port);
+
+ spin_unlock_irq(&port->port_lock);
+}
+
+static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gs_port *port = ep->driver_data;
+
+ /* Queue all received data until the tty layer is ready for it. */
+ spin_lock(&port->port_lock);
+ list_add_tail(&req->list, &port->read_queue);
+ tasklet_schedule(&port->push);
spin_unlock(&port->port_lock);
}
@@ -625,6 +676,7 @@ static int gs_start_io(struct gs_port *port)
}
/* queue read requests */
+ port->n_read = 0;
started = gs_start_rx(port);
/* unblock any pending writes into our circular buffer */
@@ -633,9 +685,10 @@ static int gs_start_io(struct gs_port *port)
} else {
gs_free_requests(ep, head);
gs_free_requests(port->port_usb->in, &port->write_pool);
+ status = -EIO;
}
- return started ? 0 : status;
+ return status;
}
/*-------------------------------------------------------------------------*/
@@ -736,10 +789,13 @@ static int gs_open(struct tty_struct *tty, struct file *file)
/* if connected, start the I/O stream */
if (port->port_usb) {
+ struct gserial *gser = port->port_usb;
+
pr_debug("gs_open: start ttyGS%d\n", port->port_num);
gs_start_io(port);
- /* REVISIT for ACM, issue "network connected" event */
+ if (gser->connect)
+ gser->connect(gser);
}
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
@@ -766,6 +822,7 @@ static int gs_writes_finished(struct gs_port *p)
static void gs_close(struct tty_struct *tty, struct file *file)
{
struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
spin_lock_irq(&port->port_lock);
@@ -785,32 +842,31 @@ static void gs_close(struct tty_struct *tty, struct file *file)
port->openclose = true;
port->open_count = 0;
- if (port->port_usb)
- /* REVISIT for ACM, issue "network disconnected" event */;
+ gser = port->port_usb;
+ if (gser && gser->disconnect)
+ gser->disconnect(gser);
/* wait for circular write buffer to drain, disconnect, or at
* most GS_CLOSE_TIMEOUT seconds; then discard the rest
*/
- if (gs_buf_data_avail(&port->port_write_buf) > 0
- && port->port_usb) {
+ if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
gs_writes_finished(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
}
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
* let the push tasklet fire again until we're re-opened.
*/
- if (port->port_usb == NULL)
+ if (gser == NULL)
gs_buf_free(&port->port_write_buf);
else
gs_buf_clear(&port->port_write_buf);
- tasklet_kill(&port->push);
-
tty->driver_data = NULL;
port->port_tty = NULL;
@@ -911,15 +967,35 @@ static void gs_unthrottle(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
- unsigned started = 0;
spin_lock_irqsave(&port->port_lock, flags);
- if (port->port_usb)
- started = gs_start_rx(port);
+ if (port->port_usb) {
+ /* Kickstart read queue processing. We don't do xon/xoff,
+ * rts/cts, or other handshaking with the host, but if the
+ * read queue backs up enough we'll be NAKing OUT packets.
+ */
+ tasklet_schedule(&port->push);
+ pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
+ }
spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int gs_break_ctl(struct tty_struct *tty, int duration)
+{
+ struct gs_port *port = tty->driver_data;
+ int status = 0;
+ struct gserial *gser;
+
+ pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
+ port->port_num, duration);
- pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n",
- port->port_num, started);
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+ if (gser && gser->send_break)
+ status = gser->send_break(gser, duration);
+ spin_unlock_irq(&port->port_lock);
+
+ return status;
}
static const struct tty_operations gs_tty_ops = {
@@ -931,6 +1007,7 @@ static const struct tty_operations gs_tty_ops = {
.write_room = gs_write_room,
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
+ .break_ctl = gs_break_ctl,
};
/*-------------------------------------------------------------------------*/
@@ -953,6 +1030,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
INIT_LIST_HEAD(&port->read_pool);
+ INIT_LIST_HEAD(&port->read_queue);
INIT_LIST_HEAD(&port->write_pool);
port->port_num = port_num;
@@ -997,7 +1075,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count)
gs_tty_driver->owner = THIS_MODULE;
gs_tty_driver->driver_name = "g_serial";
- gs_tty_driver->name = "ttyGS";
+ gs_tty_driver->name = PREFIX;
/* uses dynamically assigned dev_t values */
gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1104,6 +1182,8 @@ void gserial_cleanup(void)
ports[i].port = NULL;
mutex_unlock(&ports[i].lock);
+ tasklet_kill(&port->push);
+
/* wait for old opens to finish */
wait_event(port->close_wait, gs_closed(port));
@@ -1175,14 +1255,17 @@ int gserial_connect(struct gserial *gser, u8 port_num)
/* REVISIT if waiting on "carrier detect", signal. */
- /* REVISIT for ACM, issue "network connection" status notification:
- * connected if open_count, else disconnected.
+ /* if it's already open, start I/O ... and notify the serial
+ * protocol about open/close status (connect/disconnect).
*/
-
- /* if it's already open, start I/O */
if (port->open_count) {
pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
gs_start_io(port);
+ if (gser->connect)
+ gser->connect(gser);
+ } else {
+ if (gser->disconnect)
+ gser->disconnect(gser);
}
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1241,6 +1324,7 @@ void gserial_disconnect(struct gserial *gser)
if (port->open_count == 0 && !port->openclose)
gs_buf_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool);
+ gs_free_requests(gser->out, &port->read_queue);
gs_free_requests(gser->in, &port->write_pool);
spin_unlock_irqrestore(&port->port_lock, flags);
}
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 7b561138f90e..af3910d01aea 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -23,8 +23,7 @@
* style I/O using the USB peripheral endpoints listed here, including
* hookups to sysfs and /dev for each logical "tty" device.
*
- * REVISIT need TTY --> USB event flow too, so ACM can report open/close
- * as carrier detect events. Model after ECM. There's more ACM state too.
+ * REVISIT at least ACM could support tiocmget() if needed.
*
* REVISIT someday, allow multiplexing several TTYs over these endpoints.
*/
@@ -41,8 +40,17 @@ struct gserial {
/* REVISIT avoid this CDC-ACM support harder ... */
struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
+
+ /* notification callbacks */
+ void (*connect)(struct gserial *p);
+ void (*disconnect)(struct gserial *p);
+ int (*send_break)(struct gserial *p, int duration);
};
+/* utilities to allocate/free request and buffer */
+struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
+void gs_free_req(struct usb_ep *, struct usb_request *req);
+
/* port setup/teardown is handled by gadget driver */
int gserial_setup(struct usb_gadget *g, unsigned n_ports);
void gserial_cleanup(void);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 5fbdc14e63b3..5416cf969005 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
-#include <asm/plat-orion/ehci-orion.h>
+#include <plat/ehci-orion.h>
#define rdl(off) __raw_readl(hcd->regs + (off))
#define wrl(off, val) __raw_writel((val), hcd->regs + (off))
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c858f2adb929..d22a84f86a33 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -126,9 +126,8 @@ static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
* doesn't quite work because some people have to enforce 32-bit access
*/
static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
- __u32 __iomem *dst, u32 offset, u32 len)
+ __u32 __iomem *dst, u32 len)
{
- struct usb_hcd *hcd = priv_to_hcd(priv);
u32 val;
u8 *buff8;
@@ -136,11 +135,6 @@ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
return;
}
- isp1760_writel(offset, hcd->regs + HC_MEMORY_REG);
- /* XXX
- * 90nsec delay, the spec says something how this could be avoided.
- */
- mdelay(1);
while (len >= 4) {
*src = __raw_readl(dst);
@@ -987,8 +981,20 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
printk(KERN_ERR "qh is 0\n");
continue;
}
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs,
- atl_regs, sizeof(ptd));
+ isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
+ HC_MEMORY_REG);
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+ /*
+ * write bank1 address twice to ensure the 90ns delay (time
+ * between BANK0 write and the priv_read_copy() call is at
+ * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
+ */
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+
+ priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
+ ISP_BANK(0), sizeof(ptd));
dw1 = le32_to_cpu(ptd.dw1);
dw2 = le32_to_cpu(ptd.dw2);
@@ -1091,7 +1097,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
case IN_PID:
priv_read_copy(priv,
priv->atl_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload, payload,
+ usb_hcd->regs + payload + ISP_BANK(1),
length);
case OUT_PID:
@@ -1122,11 +1128,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
} else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
/* short BULK received */
- printk(KERN_ERR "short bulk, %d instead %zu\n", length,
- qtd->length);
if (urb->transfer_flags & URB_SHORT_NOT_OK) {
urb->status = -EREMOTEIO;
- printk(KERN_ERR "not okey\n");
+ isp1760_dbg(priv, "short bulk, %d instead %zu "
+ "with URB_SHORT_NOT_OK flag.\n",
+ length, qtd->length);
}
if (urb->status == -EINPROGRESS)
@@ -1206,8 +1212,20 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
continue;
}
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs,
- int_regs, sizeof(ptd));
+ isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
+ HC_MEMORY_REG);
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+ /*
+ * write bank1 address twice to ensure the 90ns delay (time
+ * between BANK0 write and the priv_read_copy() call is at
+ * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
+ */
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+
+ priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
+ ISP_BANK(0), sizeof(ptd));
dw1 = le32_to_cpu(ptd.dw1);
dw3 = le32_to_cpu(ptd.dw3);
check_int_err_status(le32_to_cpu(ptd.dw4));
@@ -1242,7 +1260,7 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
case IN_PID:
priv_read_copy(priv,
priv->int_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload , payload,
+ usb_hcd->regs + payload + ISP_BANK(1),
length);
case OUT_PID:
@@ -1615,8 +1633,7 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return -EPIPE;
}
- isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
- return 0;
+ return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
}
static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 6473dd86993c..4377277667d9 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -54,6 +54,8 @@ void deinit_kmem_cache(void);
#define BUFFER_MAP 0x7
#define HC_MEMORY_REG 0x33c
+#define ISP_BANK(x) ((x) << 16)
+
#define HC_PORT1_CTRL 0x374
#define PORT1_POWER (3 << 3)
#define PORT1_INIT1 (1 << 7)
@@ -119,6 +121,9 @@ struct inter_packet_info {
typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd);
+#define isp1760_dbg(priv, fmt, args...) \
+ dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
+
#define isp1760_info(priv, fmt, args...) \
dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 26bc47941d01..89901962cbfd 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -86,6 +86,21 @@ static void ohci_stop (struct usb_hcd *hcd);
static int ohci_restart (struct ohci_hcd *ohci);
#endif
+#ifdef CONFIG_PCI
+static void quirk_amd_pll(int state);
+static void amd_iso_dev_put(void);
+#else
+static inline void quirk_amd_pll(int state)
+{
+ return;
+}
+static inline void amd_iso_dev_put(void)
+{
+ return;
+}
+#endif
+
+
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
@@ -483,6 +498,9 @@ static int ohci_init (struct ohci_hcd *ohci)
int ret;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
+ if (distrust_firmware)
+ ohci->flags |= OHCI_QUIRK_HUB_POWER;
+
disable (ohci);
ohci->regs = hcd->regs;
@@ -689,7 +707,8 @@ retry:
temp |= RH_A_NOCP;
temp &= ~(RH_A_POTPGT | RH_A_NPS);
ohci_writel (ohci, temp, &ohci->regs->roothub.a);
- } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
+ } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
+ (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
@@ -882,6 +901,8 @@ static void ohci_stop (struct usb_hcd *hcd)
if (quirk_zfmicro(ohci))
del_timer(&ohci->unlink_watchdog);
+ if (quirk_amdiso(ohci))
+ amd_iso_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index b56739221d11..439beb784f3e 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -483,6 +483,13 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
length++;
}
+ /* Some broken controllers never turn off RHCS in the interrupt
+ * status register. For their sake we won't re-enable RHSC
+ * interrupts if the flag is already set.
+ */
+ if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC)
+ changed = 1;
+
/* look at each port */
for (i = 0; i < ohci->num_ports; i++) {
u32 status = roothub_portstatus (ohci, i);
@@ -572,8 +579,6 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
return 0;
}
-static void start_hnp(struct ohci_hcd *ohci);
-
#else
#define ohci_start_port_reset NULL
@@ -760,7 +765,7 @@ static int ohci_hub_control (
#ifdef CONFIG_USB_OTG
if (hcd->self.otg_port == (wIndex + 1)
&& hcd->self.b_hnp_enable)
- start_hnp(ohci);
+ ohci->start_hnp(ohci);
else
#endif
ohci_writel (ohci, RH_PS_PSS,
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 94dfca02f7e1..3d532b709670 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -225,6 +225,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
dev_err(hcd->self.controller, "can't find transceiver\n");
return -ENODEV;
}
+ ohci->start_hnp = start_hnp;
}
#endif
@@ -260,7 +261,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
omap_cfg_reg(W4_USB_HIGHZ);
}
ohci_writel(ohci, rh, &ohci->regs->roothub.a);
- distrust_firmware = 0;
+ ohci->flags &= ~OHCI_QUIRK_HUB_POWER;
} else if (machine_is_nokia770()) {
/* We require a self-powered hub, which should have
* plenty of power. */
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 4696cc912e16..083e8df0a817 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -18,6 +18,28 @@
#error "This file is PCI bus glue. CONFIG_PCI must be defined."
#endif
+#include <linux/pci.h>
+#include <linux/io.h>
+
+
+/* constants used to work around PM-related transfer
+ * glitches in some AMD 700 series southbridges
+ */
+#define AB_REG_BAR 0xf0
+#define AB_INDX(addr) ((addr) + 0x00)
+#define AB_DATA(addr) ((addr) + 0x04)
+#define AX_INDXC 0X30
+#define AX_DATAC 0x34
+
+#define NB_PCIE_INDX_ADDR 0xe0
+#define NB_PCIE_INDX_DATA 0xe4
+#define PCIE_P_CNTL 0x10040
+#define BIF_NB 0x10002
+
+static struct pci_dev *amd_smbus_dev;
+static struct pci_dev *amd_hb_dev;
+static int amd_ohci_iso_count;
+
/*-------------------------------------------------------------------------*/
static int broken_suspend(struct usb_hcd *hcd)
@@ -143,6 +165,103 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
return 0;
}
+static int ohci_quirk_amd700(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ u8 rev = 0;
+
+ if (!amd_smbus_dev)
+ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+ PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+ if (!amd_smbus_dev)
+ return 0;
+
+ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if ((rev > 0x3b) || (rev < 0x30)) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ return 0;
+ }
+
+ amd_ohci_iso_count++;
+
+ if (!amd_hb_dev)
+ amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL);
+
+ ohci->flags |= OHCI_QUIRK_AMD_ISO;
+ ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n");
+
+ return 0;
+}
+
+/*
+ * The hardware normally enables the A-link power management feature, which
+ * lets the system lower the power consumption in idle states.
+ *
+ * Assume the system is configured to have USB 1.1 ISO transfers going
+ * to or from a USB device. Without this quirk, that stream may stutter
+ * or have breaks occasionally. For transfers going to speakers, this
+ * makes a very audible mess...
+ *
+ * That audio playback corruption is due to the audio stream getting
+ * interrupted occasionally when the link goes in lower power state
+ * This USB quirk prevents the link going into that lower power state
+ * during audio playback or other ISO operations.
+ */
+static void quirk_amd_pll(int on)
+{
+ u32 addr;
+ u32 val;
+ u32 bit = (on > 0) ? 1 : 0;
+
+ pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr);
+
+ /* BIT names/meanings are NDA-protected, sorry ... */
+
+ outl(AX_INDXC, AB_INDX(addr));
+ outl(0x40, AB_DATA(addr));
+ outl(AX_DATAC, AB_INDX(addr));
+ val = inl(AB_DATA(addr));
+ val &= ~((1 << 3) | (1 << 4) | (1 << 9));
+ val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9);
+ outl(val, AB_DATA(addr));
+
+ if (amd_hb_dev) {
+ addr = PCIE_P_CNTL;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
+
+ pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
+ val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
+ val |= bit | (bit << 3) | (bit << 12);
+ val |= ((!bit) << 4) | ((!bit) << 9);
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
+
+ addr = BIF_NB;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
+
+ pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
+ val &= ~(1 << 8);
+ val |= bit << 8;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
+ }
+}
+
+static void amd_iso_dev_put(void)
+{
+ amd_ohci_iso_count--;
+ if (amd_ohci_iso_count == 0) {
+ if (amd_smbus_dev) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ }
+ if (amd_hb_dev) {
+ pci_dev_put(amd_hb_dev);
+ amd_hb_dev = NULL;
+ }
+ }
+
+}
+
/* List of quirks for OHCI */
static const struct pci_device_id ohci_pci_quirks[] = {
{
@@ -181,6 +300,19 @@ static const struct pci_device_id ohci_pci_quirks[] = {
PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
.driver_data = (unsigned long) broken_suspend,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
*/
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 6a9b4c557953..c2d80f80448b 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -49,6 +49,9 @@ __acquires(ohci->lock)
switch (usb_pipetype (urb->pipe)) {
case PIPE_ISOCHRONOUS:
ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
+ if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+ && quirk_amdiso(ohci))
+ quirk_amd_pll(1);
break;
case PIPE_INTERRUPT:
ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -677,6 +680,9 @@ static void td_submit_urb (
data + urb->iso_frame_desc [cnt].offset,
urb->iso_frame_desc [cnt].length, urb, cnt);
}
+ if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+ && quirk_amdiso(ohci))
+ quirk_amd_pll(0);
periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
break;
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index dc544ddc7849..faf622eafce7 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -371,6 +371,7 @@ struct ohci_hcd {
* other external transceivers should be software-transparent
*/
struct otg_transceiver *transceiver;
+ void (*start_hnp)(struct ohci_hcd *ohci);
/*
* memory management for queue data structures
@@ -399,6 +400,8 @@ struct ohci_hcd {
#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
+#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
+#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
@@ -426,6 +429,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
{
return ohci->flags & OHCI_QUIRK_ZFMICRO;
}
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+ return ohci->flags & OHCI_QUIRK_AMD_ISO;
+}
#else
static inline int quirk_nec(struct ohci_hcd *ohci)
{
@@ -435,6 +442,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
{
return 0;
}
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+ return 0;
+}
#endif
/* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d5f02dddb120..ea7126f99cab 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -964,11 +964,34 @@ static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
disable_irq_nrdy(r8a66597, pipenum);
}
+static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
+{
+ mod_timer(&r8a66597->rh_timer,
+ jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
+}
+
+static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
+ int connect)
+{
+ struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+ rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
+ rh->scount = R8A66597_MAX_SAMPLING;
+ if (connect)
+ rh->port |= 1 << USB_PORT_FEAT_CONNECTION;
+ else
+ rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION);
+ rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION;
+
+ r8a66597_root_hub_start_polling(r8a66597);
+}
+
/* this function must be called with interrupt disabled */
static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
u16 syssts)
{
if (syssts == SE0) {
+ r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
return;
}
@@ -1002,13 +1025,10 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
{
struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
- r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION);
- r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION);
-
disable_r8a66597_pipe_all(r8a66597, dev);
free_usb_address(r8a66597, dev);
- r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
+ start_root_hub_sampling(r8a66597, port, 0);
}
/* this function must be called with interrupt disabled */
@@ -1551,23 +1571,6 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
}
}
-static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
-{
- mod_timer(&r8a66597->rh_timer,
- jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
-}
-
-static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port)
-{
- struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
-
- rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
- rh->scount = R8A66597_MAX_SAMPLING;
- r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION)
- | (1 << USB_PORT_FEAT_C_CONNECTION);
- r8a66597_root_hub_start_polling(r8a66597);
-}
-
static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
@@ -1594,7 +1597,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
/* start usb bus sampling */
- start_root_hub_sampling(r8a66597, 1);
+ start_root_hub_sampling(r8a66597, 1, 1);
}
if (mask2 & DTCH) {
r8a66597_write(r8a66597, ~DTCH, INTSTS2);
@@ -1609,7 +1612,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
/* start usb bus sampling */
- start_root_hub_sampling(r8a66597, 0);
+ start_root_hub_sampling(r8a66597, 0, 1);
}
if (mask1 & DTCH) {
r8a66597_write(r8a66597, ~DTCH, INTSTS1);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 001789c9a11a..4ea50e0abcbb 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -42,16 +42,6 @@ config USB_ADUTUX
To compile this driver as a module, choose M here. The module
will be called adutux.
-config USB_AUERSWALD
- tristate "USB Auerswald ISDN support"
- depends on USB
- help
- Say Y here if you want to connect an Auerswald USB ISDN Device
- to your computer's USB port.
-
- To compile this driver as a module, choose M here: the
- module will be called auerswald.
-
config USB_RIO500
tristate "USB Diamond Rio500 support"
depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index aba091cb5ec0..45b4e12afb08 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_USB_ADUTUX) += adutux.o
obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
-obj-$(CONFIG_USB_AUERSWALD) += auerswald.o
obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
obj-$(CONFIG_USB_CYTHERM) += cytherm.o
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
deleted file mode 100644
index d2f61d5510e7..000000000000
--- a/drivers/usb/misc/auerswald.c
+++ /dev/null
@@ -1,2152 +0,0 @@
-/*****************************************************************************/
-/*
- * auerswald.c -- Auerswald PBX/System Telephone usb driver.
- *
- * Copyright (C) 2001 Wolfgang Mües (wolfgang@iksw-muees.de)
- *
- * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl)
- * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
- /*****************************************************************************/
-
-/* Standard Linux module include files */
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/usb.h>
-#include <linux/mutex.h>
-
-/*-------------------------------------------------------------------*/
-/* Debug support */
-#ifdef DEBUG
-#define dump( adr, len) \
-do { \
- unsigned int u; \
- printk (KERN_DEBUG); \
- for (u = 0; u < len; u++) \
- printk (" %02X", adr[u] & 0xFF); \
- printk ("\n"); \
-} while (0)
-#else
-#define dump( adr, len)
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Version Information */
-#define DRIVER_VERSION "0.9.11"
-#define DRIVER_AUTHOR "Wolfgang Mües <wolfgang@iksw-muees.de>"
-#define DRIVER_DESC "Auerswald PBX/System Telephone usb driver"
-
-/*-------------------------------------------------------------------*/
-/* Private declarations for Auerswald USB driver */
-
-/* Auerswald Vendor ID */
-#define ID_AUERSWALD 0x09BF
-
-#define AUER_MINOR_BASE 112 /* auerswald driver minor number */
-
-/* we can have up to this number of device plugged in at once */
-#define AUER_MAX_DEVICES 16
-
-
-/* Number of read buffers for each device */
-#define AU_RBUFFERS 10
-
-/* Number of chain elements for each control chain */
-#define AUCH_ELEMENTS 20
-
-/* Number of retries in communication */
-#define AU_RETRIES 10
-
-/*-------------------------------------------------------------------*/
-/* vendor specific protocol */
-/* Header Byte */
-#define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */
-#define AUH_DIRECT 0x00 /* data is for USB device */
-#define AUH_INDIRECT 0x80 /* USB device is relay */
-
-#define AUH_SPLITMASK 0x40 /* mask for split bit */
-#define AUH_UNSPLIT 0x00 /* data block is full-size */
-#define AUH_SPLIT 0x40 /* data block is part of a larger one,
- split-byte follows */
-
-#define AUH_TYPEMASK 0x3F /* mask for type of data transfer */
-#define AUH_TYPESIZE 0x40 /* different types */
-#define AUH_DCHANNEL 0x00 /* D channel data */
-#define AUH_B1CHANNEL 0x01 /* B1 channel transparent */
-#define AUH_B2CHANNEL 0x02 /* B2 channel transparent */
-/* 0x03..0x0F reserved for driver internal use */
-#define AUH_COMMAND 0x10 /* Command channel */
-#define AUH_BPROT 0x11 /* Configuration block protocol */
-#define AUH_DPROTANA 0x12 /* D channel protocol analyzer */
-#define AUH_TAPI 0x13 /* telephone api data (ATD) */
-/* 0x14..0x3F reserved for other protocols */
-#define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */
-#define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */
-
-#define AUH_SIZE 1 /* Size of Header Byte */
-
-/* Split Byte. Only present if split bit in header byte set.*/
-#define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */
-#define AUS_FIRST 0x80 /* first block */
-#define AUS_FOLLOW 0x00 /* following block */
-
-#define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */
-#define AUS_END 0x40 /* last block */
-#define AUS_NOEND 0x00 /* not the last block */
-
-#define AUS_LENMASK 0x3F /* mask for block length information */
-
-/* Request types */
-#define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */
-#define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */
-
-/* Vendor Requests */
-#define AUV_GETINFO 0x00 /* GetDeviceInfo */
-#define AUV_WBLOCK 0x01 /* Write Block */
-#define AUV_RBLOCK 0x02 /* Read Block */
-#define AUV_CHANNELCTL 0x03 /* Channel Control */
-#define AUV_DUMMY 0x04 /* Dummy Out for retry */
-
-/* Device Info Types */
-#define AUDI_NUMBCH 0x0000 /* Number of supported B channels */
-#define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */
-#define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */
-
-/* Interrupt endpoint definitions */
-#define AU_IRQENDP 1 /* Endpoint number */
-#define AU_IRQCMDID 16 /* Command-block ID */
-#define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */
-#define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */
-
-/* Device String Descriptors */
-#define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */
-#define AUSI_DEVICE 2 /* Name of the Device */
-#define AUSI_SERIALNR 3 /* Serial Number */
-#define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */
-
-#define AUSI_DLEN 100 /* Max. Length of Device Description */
-
-#define AUV_RETRY 0x101 /* First Firmware version which can do control retries */
-
-/*-------------------------------------------------------------------*/
-/* External data structures / Interface */
-typedef struct
-{
- char __user *buf; /* return buffer for string contents */
- unsigned int bsize; /* size of return buffer */
-} audevinfo_t,*paudevinfo_t;
-
-/* IO controls */
-#define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */
-#define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */
-#define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */
-#define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */
-#define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */
-#define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */
-#define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */
-/* 'U' 0xF7..0xFF reseved */
-
-/*-------------------------------------------------------------------*/
-/* Internal data structures */
-
-/* ..................................................................*/
-/* urb chain element */
-struct auerchain; /* forward for circular reference */
-typedef struct
-{
- struct auerchain *chain; /* pointer to the chain to which this element belongs */
- struct urb * urbp; /* pointer to attached urb */
- void *context; /* saved URB context */
- usb_complete_t complete; /* saved URB completion function */
- struct list_head list; /* to include element into a list */
-} auerchainelement_t,*pauerchainelement_t;
-
-/* urb chain */
-typedef struct auerchain
-{
- pauerchainelement_t active; /* element which is submitted to urb */
- spinlock_t lock; /* protection agains interrupts */
- struct list_head waiting_list; /* list of waiting elements */
- struct list_head free_list; /* list of available elements */
-} auerchain_t,*pauerchain_t;
-
-/* urb blocking completion helper struct */
-typedef struct
-{
- wait_queue_head_t wqh; /* wait for completion */
- unsigned int done; /* completion flag */
-} auerchain_chs_t,*pauerchain_chs_t;
-
-/* ...................................................................*/
-/* buffer element */
-struct auerbufctl; /* forward */
-typedef struct
-{
- char *bufp; /* reference to allocated data buffer */
- unsigned int len; /* number of characters in data buffer */
- unsigned int retries; /* for urb retries */
- struct usb_ctrlrequest *dr; /* for setup data in control messages */
- struct urb * urbp; /* USB urb */
- struct auerbufctl *list; /* pointer to list */
- struct list_head buff_list; /* reference to next buffer in list */
-} auerbuf_t,*pauerbuf_t;
-
-/* buffer list control block */
-typedef struct auerbufctl
-{
- spinlock_t lock; /* protection in interrupt */
- struct list_head free_buff_list;/* free buffers */
- struct list_head rec_buff_list; /* buffers with receive data */
-} auerbufctl_t,*pauerbufctl_t;
-
-/* ...................................................................*/
-/* service context */
-struct auerscon; /* forward */
-typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t);
-typedef void (*auer_disconn_t) (struct auerscon*);
-typedef struct auerscon
-{
- unsigned int id; /* protocol service id AUH_xxxx */
- auer_dispatch_t dispatch; /* dispatch read buffer */
- auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */
-} auerscon_t,*pauerscon_t;
-
-/* ...................................................................*/
-/* USB device context */
-typedef struct
-{
- struct mutex mutex; /* protection in user context */
- char name[20]; /* name of the /dev/usb entry */
- unsigned int dtindex; /* index in the device table */
- struct usb_device * usbdev; /* USB device handle */
- int open_count; /* count the number of open character channels */
- char dev_desc[AUSI_DLEN];/* for storing a textual description */
- unsigned int maxControlLength; /* max. Length of control paket (without header) */
- struct urb * inturbp; /* interrupt urb */
- char * intbufp; /* data buffer for interrupt urb */
- unsigned int irqsize; /* size of interrupt endpoint 1 */
- struct auerchain controlchain; /* for chaining of control messages */
- auerbufctl_t bufctl; /* Buffer control for control transfers */
- pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */
- unsigned int version; /* Version of the device */
- wait_queue_head_t bufferwait; /* wait for a control buffer */
-} auerswald_t,*pauerswald_t;
-
-/* ................................................................... */
-/* character device context */
-typedef struct
-{
- struct mutex mutex; /* protection in user context */
- pauerswald_t auerdev; /* context pointer of assigned device */
- auerbufctl_t bufctl; /* controls the buffer chain */
- auerscon_t scontext; /* service context */
- wait_queue_head_t readwait; /* for synchronous reading */
- struct mutex readmutex; /* protection against multiple reads */
- pauerbuf_t readbuf; /* buffer held for partial reading */
- unsigned int readoffset; /* current offset in readbuf */
- unsigned int removed; /* is != 0 if device is removed */
-} auerchar_t,*pauerchar_t;
-
-
-/*-------------------------------------------------------------------*/
-/* Forwards */
-static void auerswald_ctrlread_complete (struct urb * urb);
-static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp);
-static struct usb_driver auerswald_driver;
-
-
-/*-------------------------------------------------------------------*/
-/* USB chain helper functions */
-/* -------------------------- */
-
-/* completion function for chained urbs */
-static void auerchain_complete (struct urb * urb)
-{
- unsigned long flags;
- int result;
-
- /* get pointer to element and to chain */
- pauerchainelement_t acep = urb->context;
- pauerchain_t acp = acep->chain;
-
- /* restore original entries in urb */
- urb->context = acep->context;
- urb->complete = acep->complete;
-
- dbg ("auerchain_complete called");
-
- /* call original completion function
- NOTE: this function may lead to more urbs submitted into the chain.
- (no chain lock at calling complete()!)
- acp->active != NULL is protecting us against recursion.*/
- urb->complete (urb);
-
- /* detach element from chain data structure */
- spin_lock_irqsave (&acp->lock, flags);
- if (acp->active != acep) /* paranoia debug check */
- dbg ("auerchain_complete: completion on non-active element called!");
- else
- acp->active = NULL;
-
- /* add the used chain element to the list of free elements */
- list_add_tail (&acep->list, &acp->free_list);
- acep = NULL;
-
- /* is there a new element waiting in the chain? */
- if (!acp->active && !list_empty (&acp->waiting_list)) {
- /* yes: get the entry */
- struct list_head *tmp = acp->waiting_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- acp->active = acep;
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* submit the new urb */
- if (acep) {
- urb = acep->urbp;
- dbg ("auerchain_complete: submitting next urb from chain");
- urb->status = 0; /* needed! */
- result = usb_submit_urb(urb, GFP_ATOMIC);
-
- /* check for submit errors */
- if (result) {
- urb->status = result;
- dbg("auerchain_complete: usb_submit_urb with error code %d", result);
- /* and do error handling via *this* completion function (recursive) */
- auerchain_complete( urb);
- }
- } else {
- /* simple return without submitting a new urb.
- The empty chain is detected with acp->active == NULL. */
- };
-}
-
-
-/* submit function for chained urbs
- this function may be called from completion context or from user space!
- early = 1 -> submit in front of chain
-*/
-static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early)
-{
- int result;
- unsigned long flags;
- pauerchainelement_t acep = NULL;
-
- dbg ("auerchain_submit_urb called");
-
- /* try to get a chain element */
- spin_lock_irqsave (&acp->lock, flags);
- if (!list_empty (&acp->free_list)) {
- /* yes: get the entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* if no chain element available: return with error */
- if (!acep) {
- return -ENOMEM;
- }
-
- /* fill in the new chain element values */
- acep->chain = acp;
- acep->context = urb->context;
- acep->complete = urb->complete;
- acep->urbp = urb;
- INIT_LIST_HEAD (&acep->list);
-
- /* modify urb */
- urb->context = acep;
- urb->complete = auerchain_complete;
- urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */
-
- /* add element to chain - or start it immediately */
- spin_lock_irqsave (&acp->lock, flags);
- if (acp->active) {
- /* there is traffic in the chain, simple add element to chain */
- if (early) {
- dbg ("adding new urb to head of chain");
- list_add (&acep->list, &acp->waiting_list);
- } else {
- dbg ("adding new urb to end of chain");
- list_add_tail (&acep->list, &acp->waiting_list);
- }
- acep = NULL;
- } else {
- /* the chain is empty. Prepare restart */
- acp->active = acep;
- }
- /* Spin has to be removed before usb_submit_urb! */
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* Submit urb if immediate restart */
- if (acep) {
- dbg("submitting urb immediate");
- urb->status = 0; /* needed! */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- /* check for submit errors */
- if (result) {
- urb->status = result;
- dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result);
- /* and do error handling via completion function */
- auerchain_complete( urb);
- }
- }
-
- return 0;
-}
-
-/* submit function for chained urbs
- this function may be called from completion context or from user space!
-*/
-static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb)
-{
- return auerchain_submit_urb_list (acp, urb, 0);
-}
-
-/* cancel an urb which is submitted to the chain
- the result is 0 if the urb is cancelled, or -EINPROGRESS if
- the function is successfully started.
-*/
-static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb)
-{
- unsigned long flags;
- struct urb * urbp;
- pauerchainelement_t acep;
- struct list_head *tmp;
-
- dbg ("auerchain_unlink_urb called");
-
- /* search the chain of waiting elements */
- spin_lock_irqsave (&acp->lock, flags);
- list_for_each (tmp, &acp->waiting_list) {
- acep = list_entry (tmp, auerchainelement_t, list);
- if (acep->urbp == urb) {
- list_del (tmp);
- urb->context = acep->context;
- urb->complete = acep->complete;
- list_add_tail (&acep->list, &acp->free_list);
- spin_unlock_irqrestore (&acp->lock, flags);
- dbg ("unlink waiting urb");
- urb->status = -ENOENT;
- urb->complete (urb);
- return 0;
- }
- }
- /* not found. */
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* get the active urb */
- acep = acp->active;
- if (acep) {
- urbp = acep->urbp;
-
- /* check if we have to cancel the active urb */
- if (urbp == urb) {
- /* note that there is a race condition between the check above
- and the unlink() call because of no lock. This race is harmless,
- because the usb module will detect the unlink() after completion.
- We can't use the acp->lock here because the completion function
- wants to grab it.
- */
- dbg ("unlink active urb");
- return usb_unlink_urb (urbp);
- }
- }
-
- /* not found anyway
- ... is some kind of success
- */
- dbg ("urb to unlink not found in chain");
- return 0;
-}
-
-/* cancel all urbs which are in the chain.
- this function must not be called from interrupt or completion handler.
-*/
-static void auerchain_unlink_all (pauerchain_t acp)
-{
- unsigned long flags;
- struct urb * urbp;
- pauerchainelement_t acep;
-
- dbg ("auerchain_unlink_all called");
-
- /* clear the chain of waiting elements */
- spin_lock_irqsave (&acp->lock, flags);
- while (!list_empty (&acp->waiting_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->waiting_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- urbp = acep->urbp;
- urbp->context = acep->context;
- urbp->complete = acep->complete;
- list_add_tail (&acep->list, &acp->free_list);
- spin_unlock_irqrestore (&acp->lock, flags);
- dbg ("unlink waiting urb");
- urbp->status = -ENOENT;
- urbp->complete (urbp);
- spin_lock_irqsave (&acp->lock, flags);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* clear the active urb */
- acep = acp->active;
- if (acep) {
- urbp = acep->urbp;
- dbg ("unlink active urb");
- usb_kill_urb (urbp);
- }
-}
-
-
-/* free the chain.
- this function must not be called from interrupt or completion handler.
-*/
-static void auerchain_free (pauerchain_t acp)
-{
- unsigned long flags;
- pauerchainelement_t acep;
-
- dbg ("auerchain_free called");
-
- /* first, cancel all pending urbs */
- auerchain_unlink_all (acp);
-
- /* free the elements */
- spin_lock_irqsave (&acp->lock, flags);
- while (!list_empty (&acp->free_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- spin_unlock_irqrestore (&acp->lock, flags);
- acep = list_entry (tmp, auerchainelement_t, list);
- kfree (acep);
- spin_lock_irqsave (&acp->lock, flags);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-}
-
-
-/* Init the chain control structure */
-static void auerchain_init (pauerchain_t acp)
-{
- /* init the chain data structure */
- acp->active = NULL;
- spin_lock_init (&acp->lock);
- INIT_LIST_HEAD (&acp->waiting_list);
- INIT_LIST_HEAD (&acp->free_list);
-}
-
-/* setup a chain.
- It is assumed that there is no concurrency while setting up the chain
- requirement: auerchain_init()
-*/
-static int auerchain_setup (pauerchain_t acp, unsigned int numElements)
-{
- pauerchainelement_t acep;
-
- dbg ("auerchain_setup called with %d elements", numElements);
-
- /* fill the list of free elements */
- for (;numElements; numElements--) {
- acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL);
- if (!acep)
- goto ac_fail;
- INIT_LIST_HEAD (&acep->list);
- list_add_tail (&acep->list, &acp->free_list);
- }
- return 0;
-
-ac_fail:/* free the elements */
- while (!list_empty (&acp->free_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- kfree (acep);
- }
- return -ENOMEM;
-}
-
-
-/* completion handler for synchronous chained URBs */
-static void auerchain_blocking_completion (struct urb *urb)
-{
- pauerchain_chs_t pchs = urb->context;
- pchs->done = 1;
- wmb();
- wake_up (&pchs->wqh);
-}
-
-
-/* Starts chained urb and waits for completion or timeout */
-static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length)
-{
- auerchain_chs_t chs;
- int status;
-
- dbg ("auerchain_start_wait_urb called");
- init_waitqueue_head (&chs.wqh);
- chs.done = 0;
-
- urb->context = &chs;
- status = auerchain_submit_urb (acp, urb);
- if (status)
- /* something went wrong */
- return status;
-
- timeout = wait_event_timeout(chs.wqh, chs.done, timeout);
-
- if (!timeout && !chs.done) {
- if (urb->status != -EINPROGRESS) { /* No callback?!! */
- dbg ("auerchain_start_wait_urb: raced timeout");
- status = urb->status;
- } else {
- dbg ("auerchain_start_wait_urb: timeout");
- auerchain_unlink_urb (acp, urb); /* remove urb safely */
- status = -ETIMEDOUT;
- }
- } else
- status = urb->status;
-
- if (status >= 0)
- *actual_length = urb->actual_length;
-
- return status;
-}
-
-
-/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion
- acp: pointer to the auerchain
- dev: pointer to the usb device to send the message to
- pipe: endpoint "pipe" to send the message to
- request: USB message request value
- requesttype: USB message request type value
- value: USB message value
- index: USB message index value
- data: pointer to the data to send
- size: length in bytes of the data to send
- timeout: time to wait for the message to complete before timing out (if 0 the wait is forever)
-
- This function sends a simple control message to a specified endpoint
- and waits for the message to complete, or timeout.
-
- If successful, it returns the transferred length, otherwise a negative error number.
-
- Don't use this function from within an interrupt context, like a
- bottom half handler. If you need an asynchronous message, or need to send
- a message from within interrupt context, use auerchain_submit_urb()
-*/
-static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
- __u16 value, __u16 index, void *data, __u16 size, int timeout)
-{
- int ret;
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- int uninitialized_var(length);
-
- dbg ("auerchain_control_msg");
- dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
- urb = usb_alloc_urb (0, GFP_KERNEL);
- if (!urb) {
- kfree (dr);
- return -ENOMEM;
- }
-
- dr->bRequestType = requesttype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16 (value);
- dr->wIndex = cpu_to_le16 (index);
- dr->wLength = cpu_to_le16 (size);
-
- usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */
- auerchain_blocking_completion, NULL);
- ret = auerchain_start_wait_urb (acp, urb, timeout, &length);
-
- usb_free_urb (urb);
- kfree (dr);
-
- if (ret < 0)
- return ret;
- else
- return length;
-}
-
-
-/*-------------------------------------------------------------------*/
-/* Buffer List helper functions */
-
-/* free a single auerbuf */
-static void auerbuf_free (pauerbuf_t bp)
-{
- kfree(bp->bufp);
- kfree(bp->dr);
- usb_free_urb(bp->urbp);
- kfree(bp);
-}
-
-/* free the buffers from an auerbuf list */
-static void auerbuf_free_list (struct list_head *q)
-{
- struct list_head *tmp;
- struct list_head *p;
- pauerbuf_t bp;
-
- dbg ("auerbuf_free_list");
- for (p = q->next; p != q;) {
- bp = list_entry (p, auerbuf_t, buff_list);
- tmp = p->next;
- list_del (p);
- p = tmp;
- auerbuf_free (bp);
- }
-}
-
-/* init the members of a list control block */
-static void auerbuf_init (pauerbufctl_t bcp)
-{
- dbg ("auerbuf_init");
- spin_lock_init (&bcp->lock);
- INIT_LIST_HEAD (&bcp->free_buff_list);
- INIT_LIST_HEAD (&bcp->rec_buff_list);
-}
-
-/* free all buffers from an auerbuf chain */
-static void auerbuf_free_buffers (pauerbufctl_t bcp)
-{
- unsigned long flags;
- dbg ("auerbuf_free_buffers");
-
- spin_lock_irqsave (&bcp->lock, flags);
-
- auerbuf_free_list (&bcp->free_buff_list);
- auerbuf_free_list (&bcp->rec_buff_list);
-
- spin_unlock_irqrestore (&bcp->lock, flags);
-}
-
-/* setup a list of buffers */
-/* requirement: auerbuf_init() */
-static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize)
-{
- pauerbuf_t bep = NULL;
-
- dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize);
-
- /* fill the list of free elements */
- for (;numElements; numElements--) {
- bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL);
- if (!bep)
- goto bl_fail;
- bep->list = bcp;
- INIT_LIST_HEAD (&bep->buff_list);
- bep->bufp = kmalloc (bufsize, GFP_KERNEL);
- if (!bep->bufp)
- goto bl_fail;
- bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL);
- if (!bep->dr)
- goto bl_fail;
- bep->urbp = usb_alloc_urb (0, GFP_KERNEL);
- if (!bep->urbp)
- goto bl_fail;
- list_add_tail (&bep->buff_list, &bcp->free_buff_list);
- }
- return 0;
-
-bl_fail:/* not enough memory. Free allocated elements */
- dbg ("auerbuf_setup: no more memory");
- auerbuf_free(bep);
- auerbuf_free_buffers (bcp);
- return -ENOMEM;
-}
-
-/* insert a used buffer into the free list */
-static void auerbuf_releasebuf( pauerbuf_t bp)
-{
- unsigned long flags;
- pauerbufctl_t bcp = bp->list;
- bp->retries = 0;
-
- dbg ("auerbuf_releasebuf called");
- spin_lock_irqsave (&bcp->lock, flags);
- list_add_tail (&bp->buff_list, &bcp->free_buff_list);
- spin_unlock_irqrestore (&bcp->lock, flags);
-}
-
-
-/*-------------------------------------------------------------------*/
-/* Completion handlers */
-
-/* Values of urb->status or results of usb_submit_urb():
-0 Initial, OK
--EINPROGRESS during submission until end
--ENOENT if urb is unlinked
--ETIME Device did not respond
--ENOMEM Memory Overflow
--ENODEV Specified USB-device or bus doesn't exist
--ENXIO URB already queued
--EINVAL a) Invalid transfer type specified (or not supported)
- b) Invalid interrupt interval (0n256)
--EAGAIN a) Specified ISO start frame too early
- b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again.
--EFBIG Too much ISO frames requested (currently uhci900)
--EPIPE Specified pipe-handle/Endpoint is already stalled
--EMSGSIZE Endpoint message size is zero, do interface/alternate setting
--EPROTO a) Bitstuff error
- b) Unknown USB error
--EILSEQ CRC mismatch
--ENOSR Buffer error
--EREMOTEIO Short packet detected
--EXDEV ISO transfer only partially completed look at individual frame status for details
--EINVAL ISO madness, if this happens: Log off and go home
--EOVERFLOW babble
-*/
-
-/* check if a status code allows a retry */
-static int auerswald_status_retry (int status)
-{
- switch (status) {
- case 0:
- case -ETIME:
- case -EOVERFLOW:
- case -EAGAIN:
- case -EPIPE:
- case -EPROTO:
- case -EILSEQ:
- case -ENOSR:
- case -EREMOTEIO:
- return 1; /* do a retry */
- }
- return 0; /* no retry possible */
-}
-
-/* Completion of asynchronous write block */
-static void auerchar_ctrlwrite_complete (struct urb * urb)
-{
- pauerbuf_t bp = urb->context;
- pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
- dbg ("auerchar_ctrlwrite_complete called");
-
- /* reuse the buffer */
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-}
-
-/* Completion handler for dummy retry packet */
-static void auerswald_ctrlread_wretcomplete (struct urb * urb)
-{
- pauerbuf_t bp = urb->context;
- pauerswald_t cp;
- int ret;
- int status = urb->status;
-
- dbg ("auerswald_ctrlread_wretcomplete called");
- dbg ("complete with status: %d", status);
- cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-
- /* check if it is possible to advance */
- if (!auerswald_status_retry(status) || !cp->usbdev) {
- /* reuse the buffer */
- err ("control dummy: transmission error %d, can not retry", status);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- return;
- }
-
- /* fill the control message */
- bp->dr->bRequestType = AUT_RREQ;
- bp->dr->bRequest = AUV_RBLOCK;
- bp->dr->wLength = bp->dr->wValue; /* temporary stored */
- bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */
- /* bp->dr->index = channel id; remains */
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength),
- auerswald_ctrlread_complete,bp);
-
- /* submit the control msg as next paket */
- ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
- if (ret) {
- dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_complete (bp->urbp);
- }
-}
-
-/* completion handler for receiving of control messages */
-static void auerswald_ctrlread_complete (struct urb * urb)
-{
- unsigned int serviceid;
- pauerswald_t cp;
- pauerscon_t scp;
- pauerbuf_t bp = urb->context;
- int status = urb->status;
- int ret;
-
- dbg ("auerswald_ctrlread_complete called");
-
- cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-
- /* check if there is valid data in this urb */
- if (status) {
- dbg ("complete with non-zero status: %d", status);
- /* should we do a retry? */
- if (!auerswald_status_retry(status)
- || !cp->usbdev
- || (cp->version < AUV_RETRY)
- || (bp->retries >= AU_RETRIES)) {
- /* reuse the buffer */
- err ("control read: transmission error %d, can not retry", status);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- return;
- }
- bp->retries++;
- dbg ("Retry count = %d", bp->retries);
- /* send a long dummy control-write-message to allow device firmware to react */
- bp->dr->bRequestType = AUT_WREQ;
- bp->dr->bRequest = AUV_DUMMY;
- bp->dr->wValue = bp->dr->wLength; /* temporary storage */
- // bp->dr->wIndex channel ID remains
- bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, 32,
- auerswald_ctrlread_wretcomplete,bp);
-
- /* submit the control msg as next paket */
- ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
- if (ret) {
- dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_wretcomplete (bp->urbp);
- }
- return;
- }
-
- /* get the actual bytecount (incl. headerbyte) */
- bp->len = urb->actual_length;
- serviceid = bp->bufp[0] & AUH_TYPEMASK;
- dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len);
-
- /* dispatch the paket */
- scp = cp->services[serviceid];
- if (scp) {
- /* look, Ma, a listener! */
- scp->dispatch (scp, bp);
- }
-
- /* release the paket */
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-}
-
-/*-------------------------------------------------------------------*/
-/* Handling of Interrupt Endpoint */
-/* This interrupt Endpoint is used to inform the host about waiting
- messages from the USB device.
-*/
-/* int completion handler. */
-static void auerswald_int_complete (struct urb * urb)
-{
- unsigned long flags;
- unsigned int channelid;
- unsigned int bytecount;
- int ret;
- int status = urb->status;
- pauerbuf_t bp = NULL;
- pauerswald_t cp = urb->context;
-
- dbg ("%s called", __func__);
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__, status);
- return;
- default:
- dbg("%s - nonzero urb status received: %d", __func__, status);
- goto exit;
- }
-
- /* check if all needed data was received */
- if (urb->actual_length < AU_IRQMINSIZE) {
- dbg ("invalid data length received: %d bytes", urb->actual_length);
- goto exit;
- }
-
- /* check the command code */
- if (cp->intbufp[0] != AU_IRQCMDID) {
- dbg ("invalid command received: %d", cp->intbufp[0]);
- goto exit;
- }
-
- /* check the command type */
- if (cp->intbufp[1] != AU_BLOCKRDY) {
- dbg ("invalid command type received: %d", cp->intbufp[1]);
- goto exit;
- }
-
- /* now extract the information */
- channelid = cp->intbufp[2];
- bytecount = (unsigned char)cp->intbufp[3];
- bytecount |= (unsigned char)cp->intbufp[4] << 8;
-
- /* check the channel id */
- if (channelid >= AUH_TYPESIZE) {
- dbg ("invalid channel id received: %d", channelid);
- goto exit;
- }
-
- /* check the byte count */
- if (bytecount > (cp->maxControlLength+AUH_SIZE)) {
- dbg ("invalid byte count received: %d", bytecount);
- goto exit;
- }
- dbg ("Service Channel = %d", channelid);
- dbg ("Byte Count = %d", bytecount);
-
- /* get a buffer for the next data paket */
- spin_lock_irqsave (&cp->bufctl.lock, flags);
- if (!list_empty (&cp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = cp->bufctl.free_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&cp->bufctl.lock, flags);
-
- /* if no buffer available: skip it */
- if (!bp) {
- dbg ("auerswald_int_complete: no data buffer available");
- /* can we do something more?
- This is a big problem: if this int packet is ignored, the
- device will wait forever and not signal any more data.
- The only real solution is: having enough buffers!
- Or perhaps temporary disabling the int endpoint?
- */
- goto exit;
- }
-
- /* fill the control message */
- bp->dr->bRequestType = AUT_RREQ;
- bp->dr->bRequest = AUV_RBLOCK;
- bp->dr->wValue = cpu_to_le16 (0);
- bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
- bp->dr->wLength = cpu_to_le16 (bytecount);
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, bytecount,
- auerswald_ctrlread_complete,bp);
-
- /* submit the control msg */
- ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
- if (ret) {
- dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_complete( bp->urbp);
- /* here applies the same problem as above: device locking! */
- }
-exit:
- ret = usb_submit_urb (urb, GFP_ATOMIC);
- if (ret)
- err ("%s - usb_submit_urb failed with result %d",
- __func__, ret);
-}
-
-/* int memory deallocation
- NOTE: no mutex please!
-*/
-static void auerswald_int_free (pauerswald_t cp)
-{
- if (cp->inturbp) {
- usb_free_urb(cp->inturbp);
- cp->inturbp = NULL;
- }
- kfree(cp->intbufp);
- cp->intbufp = NULL;
-}
-
-/* This function is called to activate the interrupt
- endpoint. This function returns 0 if successful or an error code.
- NOTE: no mutex please!
-*/
-static int auerswald_int_open (pauerswald_t cp)
-{
- int ret;
- struct usb_host_endpoint *ep;
- int irqsize;
- dbg ("auerswald_int_open");
-
- ep = cp->usbdev->ep_in[AU_IRQENDP];
- if (!ep) {
- ret = -EFAULT;
- goto intoend;
- }
- irqsize = le16_to_cpu(ep->desc.wMaxPacketSize);
- cp->irqsize = irqsize;
-
- /* allocate the urb and data buffer */
- if (!cp->inturbp) {
- cp->inturbp = usb_alloc_urb (0, GFP_KERNEL);
- if (!cp->inturbp) {
- ret = -ENOMEM;
- goto intoend;
- }
- }
- if (!cp->intbufp) {
- cp->intbufp = kmalloc (irqsize, GFP_KERNEL);
- if (!cp->intbufp) {
- ret = -ENOMEM;
- goto intoend;
- }
- }
- /* setup urb */
- usb_fill_int_urb (cp->inturbp, cp->usbdev,
- usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp,
- irqsize, auerswald_int_complete, cp, ep->desc.bInterval);
- /* start the urb */
- cp->inturbp->status = 0; /* needed! */
- ret = usb_submit_urb (cp->inturbp, GFP_KERNEL);
-
-intoend:
- if (ret < 0) {
- /* activation of interrupt endpoint has failed. Now clean up. */
- dbg ("auerswald_int_open: activation of int endpoint failed");
-
- /* deallocate memory */
- auerswald_int_free (cp);
- }
- return ret;
-}
-
-/* This function is called to deactivate the interrupt
- endpoint. This function returns 0 if successful or an error code.
- NOTE: no mutex please!
-*/
-static void auerswald_int_release (pauerswald_t cp)
-{
- dbg ("auerswald_int_release");
-
- /* stop the int endpoint */
- usb_kill_urb (cp->inturbp);
-
- /* deallocate memory */
- auerswald_int_free (cp);
-}
-
-/* --------------------------------------------------------------------- */
-/* Helper functions */
-
-/* wake up waiting readers */
-static void auerchar_disconnect (pauerscon_t scp)
-{
- pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
- dbg ("auerchar_disconnect called");
- ccp->removed = 1;
- wake_up (&ccp->readwait);
-}
-
-
-/* dispatch a read paket to a waiting character device */
-static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp)
-{
- unsigned long flags;
- pauerchar_t ccp;
- pauerbuf_t newbp = NULL;
- char * charp;
- dbg ("auerchar_ctrlread_dispatch called");
- ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
-
- /* get a read buffer from character device context */
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- if (!list_empty (&ccp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = ccp->bufctl.free_buff_list.next;
- list_del (tmp);
- newbp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-
- if (!newbp) {
- dbg ("No read buffer available, discard paket!");
- return; /* no buffer, no dispatch */
- }
-
- /* copy information to new buffer element
- (all buffers have the same length) */
- charp = newbp->bufp;
- newbp->bufp = bp->bufp;
- bp->bufp = charp;
- newbp->len = bp->len;
-
- /* insert new buffer in read list */
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list);
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
- dbg ("read buffer appended to rec_list");
-
- /* wake up pending synchronous reads */
- wake_up (&ccp->readwait);
-}
-
-
-/* Delete an auerswald driver context */
-static void auerswald_delete( pauerswald_t cp)
-{
- dbg( "auerswald_delete");
- if (cp == NULL)
- return;
-
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-
- /* Cleaning up */
- auerswald_int_release (cp);
- auerchain_free (&cp->controlchain);
- auerbuf_free_buffers (&cp->bufctl);
-
- /* release the memory */
- kfree( cp);
-}
-
-
-/* Delete an auerswald character context */
-static void auerchar_delete( pauerchar_t ccp)
-{
- dbg ("auerchar_delete");
- if (ccp == NULL)
- return;
-
- /* wake up pending synchronous reads */
- ccp->removed = 1;
- wake_up (&ccp->readwait);
-
- /* remove the read buffer */
- if (ccp->readbuf) {
- auerbuf_releasebuf (ccp->readbuf);
- ccp->readbuf = NULL;
- }
-
- /* remove the character buffers */
- auerbuf_free_buffers (&ccp->bufctl);
-
- /* release the memory */
- kfree( ccp);
-}
-
-
-/* add a new service to the device
- scp->id must be set!
- return: 0 if OK, else error code
-*/
-static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
-{
- int ret;
-
- /* is the device available? */
- if (!cp->usbdev) {
- dbg ("usbdev == NULL");
- return -EIO; /*no: can not add a service, sorry*/
- }
-
- /* is the service available? */
- if (cp->services[scp->id]) {
- dbg ("service is busy");
- return -EBUSY;
- }
-
- /* device is available, service is free */
- cp->services[scp->id] = scp;
-
- /* register service in device */
- ret = auerchain_control_msg(
- &cp->controlchain, /* pointer to control chain */
- cp->usbdev, /* pointer to device */
- usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
- AUV_CHANNELCTL, /* USB message request value */
- AUT_WREQ, /* USB message request type value */
- 0x01, /* open USB message value */
- scp->id, /* USB message index value */
- NULL, /* pointer to the data to send */
- 0, /* length in bytes of the data to send */
- HZ * 2); /* time to wait for the message to complete before timing out */
- if (ret < 0) {
- dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret);
- /* undo above actions */
- cp->services[scp->id] = NULL;
- return ret;
- }
-
- dbg ("auerswald_addservice: channel open OK");
- return 0;
-}
-
-
-/* remove a service from the device
- scp->id must be set! */
-static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
-{
- dbg ("auerswald_removeservice called");
-
- /* check if we have a service allocated */
- if (scp->id == AUH_UNASSIGNED)
- return;
-
- /* If there is a device: close the channel */
- if (cp->usbdev) {
- /* Close the service channel inside the device */
- int ret = auerchain_control_msg(
- &cp->controlchain, /* pointer to control chain */
- cp->usbdev, /* pointer to device */
- usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
- AUV_CHANNELCTL, /* USB message request value */
- AUT_WREQ, /* USB message request type value */
- 0x00, // close /* USB message value */
- scp->id, /* USB message index value */
- NULL, /* pointer to the data to send */
- 0, /* length in bytes of the data to send */
- HZ * 2); /* time to wait for the message to complete before timing out */
- if (ret < 0) {
- dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret);
- }
- else {
- dbg ("auerswald_removeservice: channel close OK");
- }
- }
-
- /* remove the service from the device */
- cp->services[scp->id] = NULL;
- scp->id = AUH_UNASSIGNED;
-}
-
-
-/* --------------------------------------------------------------------- */
-/* Char device functions */
-
-/* Open a new character device */
-static int auerchar_open (struct inode *inode, struct file *file)
-{
- int dtindex = iminor(inode);
- pauerswald_t cp = NULL;
- pauerchar_t ccp = NULL;
- struct usb_interface *intf;
- int ret;
-
- /* minor number in range? */
- if (dtindex < 0) {
- return -ENODEV;
- }
- intf = usb_find_interface(&auerswald_driver, dtindex);
- if (!intf) {
- return -ENODEV;
- }
-
- /* usb device available? */
- cp = usb_get_intfdata (intf);
- if (cp == NULL) {
- return -ENODEV;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- return -ERESTARTSYS;
- }
-
- /* we have access to the device. Now lets allocate memory */
- ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL);
- if (ccp == NULL) {
- err ("out of memory");
- ret = -ENOMEM;
- goto ofail;
- }
-
- /* Initialize device descriptor */
- mutex_init(&ccp->mutex);
- mutex_init(&ccp->readmutex);
- auerbuf_init (&ccp->bufctl);
- ccp->scontext.id = AUH_UNASSIGNED;
- ccp->scontext.dispatch = auerchar_ctrlread_dispatch;
- ccp->scontext.disconnect = auerchar_disconnect;
- init_waitqueue_head (&ccp->readwait);
-
- ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE);
- if (ret) {
- goto ofail;
- }
-
- cp->open_count++;
- ccp->auerdev = cp;
- dbg("open %s as /dev/%s", cp->dev_desc, cp->name);
- mutex_unlock(&cp->mutex);
-
- /* file IO stuff */
- file->f_pos = 0;
- file->private_data = ccp;
- return nonseekable_open(inode, file);
-
- /* Error exit */
-ofail: mutex_unlock(&cp->mutex);
- auerchar_delete (ccp);
- return ret;
-}
-
-
-/* IOCTL functions */
-static long auerchar_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- int ret = 0;
- audevinfo_t devinfo;
- pauerswald_t cp = NULL;
- unsigned int u;
- unsigned int __user *user_arg = (unsigned int __user *)arg;
-
- dbg ("ioctl");
-
- /* get the mutexes */
- if (mutex_lock_interruptible(&ccp->mutex)) {
- return -ERESTARTSYS;
- }
- cp = ccp->auerdev;
- if (!cp) {
- mutex_unlock(&ccp->mutex);
- return -ENODEV;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
-
- /* Check for removal */
- if (!cp->usbdev) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -ENODEV;
- }
- lock_kernel();
- switch (cmd) {
-
- /* return != 0 if Transmitt channel ready to send */
- case IOCTL_AU_TXREADY:
- dbg ("IOCTL_AU_TXREADY");
- u = ccp->auerdev
- && (ccp->scontext.id != AUH_UNASSIGNED)
- && !list_empty (&cp->bufctl.free_buff_list);
- ret = put_user (u, user_arg);
- break;
-
- /* return != 0 if connected to a service channel */
- case IOCTL_AU_CONNECT:
- dbg ("IOCTL_AU_CONNECT");
- u = (ccp->scontext.id != AUH_UNASSIGNED);
- ret = put_user (u, user_arg);
- break;
-
- /* return != 0 if Receive Data available */
- case IOCTL_AU_RXAVAIL:
- dbg ("IOCTL_AU_RXAVAIL");
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- ret = -EIO;
- break;
- }
- u = 0; /* no data */
- if (ccp->readbuf) {
- int restlen = ccp->readbuf->len - ccp->readoffset;
- if (restlen > 0)
- u = 1;
- }
- if (!u) {
- if (!list_empty (&ccp->bufctl.rec_buff_list)) {
- u = 1;
- }
- }
- ret = put_user (u, user_arg);
- break;
-
- /* return the max. buffer length for the device */
- case IOCTL_AU_BUFLEN:
- dbg ("IOCTL_AU_BUFLEN");
- u = cp->maxControlLength;
- ret = put_user (u, user_arg);
- break;
-
- /* requesting a service channel */
- case IOCTL_AU_SERVREQ:
- dbg ("IOCTL_AU_SERVREQ");
- /* requesting a service means: release the previous one first */
- auerswald_removeservice (cp, &ccp->scontext);
- /* get the channel number */
- ret = get_user (u, user_arg);
- if (ret) {
- break;
- }
- if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) {
- ret = -EIO;
- break;
- }
- dbg ("auerchar service request parameters are ok");
- ccp->scontext.id = u;
-
- /* request the service now */
- ret = auerswald_addservice (cp, &ccp->scontext);
- if (ret) {
- /* no: revert service entry */
- ccp->scontext.id = AUH_UNASSIGNED;
- }
- break;
-
- /* get a string descriptor for the device */
- case IOCTL_AU_DEVINFO:
- dbg ("IOCTL_AU_DEVINFO");
- if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) {
- ret = -EFAULT;
- break;
- }
- u = strlen(cp->dev_desc)+1;
- if (u > devinfo.bsize) {
- u = devinfo.bsize;
- }
- ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0;
- break;
-
- /* get the max. string descriptor length */
- case IOCTL_AU_SLEN:
- dbg ("IOCTL_AU_SLEN");
- u = AUSI_DLEN;
- ret = put_user (u, user_arg);
- break;
-
- default:
- dbg ("IOCTL_AU_UNKNOWN");
- ret = -ENOTTY;
- break;
- }
- unlock_kernel();
- /* release the mutexes */
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return ret;
-}
-
-/* Read data from the device */
-static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos)
-{
- unsigned long flags;
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerbuf_t bp = NULL;
- wait_queue_t wait;
-
- dbg ("auerchar_read");
-
- /* Error checking */
- if (!ccp)
- return -EIO;
- if (*ppos)
- return -ESPIPE;
- if (count == 0)
- return 0;
-
- /* get the mutex */
- if (mutex_lock_interruptible(&ccp->mutex))
- return -ERESTARTSYS;
-
- /* Can we expect to read something? */
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
-
- /* only one reader per device allowed */
- if (mutex_lock_interruptible(&ccp->readmutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
-
- /* read data from readbuf, if available */
-doreadbuf:
- bp = ccp->readbuf;
- if (bp) {
- /* read the maximum bytes */
- int restlen = bp->len - ccp->readoffset;
- if (restlen < 0)
- restlen = 0;
- if (count > restlen)
- count = restlen;
- if (count) {
- if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) {
- dbg ("auerswald_read: copy_to_user failed");
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return -EFAULT;
- }
- }
- /* advance the read offset */
- ccp->readoffset += count;
- restlen -= count;
- // reuse the read buffer
- if (restlen <= 0) {
- auerbuf_releasebuf (bp);
- ccp->readbuf = NULL;
- }
- /* return with number of bytes read */
- if (count) {
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return count;
- }
- }
-
- /* a read buffer is not available. Try to get the next data block. */
-doreadlist:
- /* Preparing for sleep */
- init_waitqueue_entry (&wait, current);
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&ccp->readwait, &wait);
-
- bp = NULL;
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- if (!list_empty (&ccp->bufctl.rec_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = ccp->bufctl.rec_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-
- /* have we got data? */
- if (bp) {
- ccp->readbuf = bp;
- ccp->readoffset = AUH_SIZE; /* for headerbyte */
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ccp->readwait, &wait);
- goto doreadbuf; /* now we can read! */
- }
-
- /* no data available. Should we wait? */
- if (file->f_flags & O_NONBLOCK) {
- dbg ("No read buffer available, returning -EAGAIN");
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ccp->readwait, &wait);
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return -EAGAIN; /* nonblocking, no data available */
- }
-
- /* yes, we should wait! */
- mutex_unlock(&ccp->mutex); /* allow other operations while we wait */
- schedule();
- remove_wait_queue (&ccp->readwait, &wait);
- if (signal_pending (current)) {
- /* waked up by a signal */
- mutex_unlock(&ccp->readmutex);
- return -ERESTARTSYS;
- }
-
- /* Anything left to read? */
- if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) {
- mutex_unlock(&ccp->readmutex);
- return -EIO;
- }
-
- if (mutex_lock_interruptible(&ccp->mutex)) {
- mutex_unlock(&ccp->readmutex);
- return -ERESTARTSYS;
- }
-
- /* try to read the incoming data again */
- goto doreadlist;
-}
-
-
-/* Write a data block into the right service channel of the device */
-static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerswald_t cp = NULL;
- pauerbuf_t bp;
- unsigned long flags;
- int ret;
- wait_queue_t wait;
-
- dbg ("auerchar_write %zd bytes", len);
-
- /* Error checking */
- if (!ccp)
- return -EIO;
- if (*ppos)
- return -ESPIPE;
- if (len == 0)
- return 0;
-
-write_again:
- /* get the mutex */
- if (mutex_lock_interruptible(&ccp->mutex))
- return -ERESTARTSYS;
-
- /* Can we expect to write something? */
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
-
- cp = ccp->auerdev;
- if (!cp) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
- if (!cp->usbdev) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
- /* Prepare for sleep */
- init_waitqueue_entry (&wait, current);
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&cp->bufferwait, &wait);
-
- /* Try to get a buffer from the device pool.
- We can't use a buffer from ccp->bufctl because the write
- command will last beond a release() */
- bp = NULL;
- spin_lock_irqsave (&cp->bufctl.lock, flags);
- if (!list_empty (&cp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = cp->bufctl.free_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&cp->bufctl.lock, flags);
-
- /* are there any buffers left? */
- if (!bp) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
-
- /* NONBLOCK: don't wait */
- if (file->f_flags & O_NONBLOCK) {
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&cp->bufferwait, &wait);
- return -EAGAIN;
- }
-
- /* BLOCKING: wait */
- schedule();
- remove_wait_queue (&cp->bufferwait, &wait);
- if (signal_pending (current)) {
- /* waked up by a signal */
- return -ERESTARTSYS;
- }
- goto write_again;
- } else {
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&cp->bufferwait, &wait);
- }
-
- /* protect against too big write requests */
- if (len > cp->maxControlLength)
- len = cp->maxControlLength;
-
- /* Fill the buffer */
- if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) {
- dbg ("copy_from_user failed");
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -EFAULT;
- }
-
- /* set the header byte */
- *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT;
-
- /* Set the transfer Parameters */
- bp->len = len+AUH_SIZE;
- bp->dr->bRequestType = AUT_WREQ;
- bp->dr->bRequest = AUV_WBLOCK;
- bp->dr->wValue = cpu_to_le16 (0);
- bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
- bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE);
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
- auerchar_ctrlwrite_complete, bp);
- /* up we go */
- ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
- mutex_unlock(&cp->mutex);
- if (ret) {
- dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
- else {
- dbg ("auerchar_write: Write OK");
- mutex_unlock(&ccp->mutex);
- return len;
- }
-}
-
-
-/* Close a character device */
-static int auerchar_release (struct inode *inode, struct file *file)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerswald_t cp;
- dbg("release");
-
- mutex_lock(&ccp->mutex);
- cp = ccp->auerdev;
- if (cp) {
- mutex_lock(&cp->mutex);
- /* remove an open service */
- auerswald_removeservice (cp, &ccp->scontext);
- /* detach from device */
- if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) {
- /* usb device waits for removal */
- mutex_unlock(&cp->mutex);
- auerswald_delete (cp);
- } else {
- mutex_unlock(&cp->mutex);
- }
- cp = NULL;
- ccp->auerdev = NULL;
- }
- mutex_unlock(&ccp->mutex);
- auerchar_delete (ccp);
-
- return 0;
-}
-
-
-/*----------------------------------------------------------------------*/
-/* File operation structure */
-static const struct file_operations auerswald_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = auerchar_read,
- .write = auerchar_write,
- .unlocked_ioctl = auerchar_ioctl,
- .open = auerchar_open,
- .release = auerchar_release,
-};
-
-static struct usb_class_driver auerswald_class = {
- .name = "auer%d",
- .fops = &auerswald_fops,
- .minor_base = AUER_MINOR_BASE,
-};
-
-
-/* --------------------------------------------------------------------- */
-/* Special USB driver functions */
-
-/* Probe if this driver wants to serve an USB device
-
- This entry point is called whenever a new device is attached to the bus.
- Then the device driver has to create a new instance of its internal data
- structures for the new device.
-
- The dev argument specifies the device context, which contains pointers
- to all USB descriptors. The interface argument specifies the interface
- number. If a USB driver wants to bind itself to a particular device and
- interface it has to return a pointer. This pointer normally references
- the device driver's context structure.
-
- Probing normally is done by checking the vendor and product identifications
- or the class and subclass definitions. If they match the interface number
- is compared with the ones supported by the driver. When probing is done
- class based it might be necessary to parse some more USB descriptors because
- the device properties can differ in a wide range.
-*/
-static int auerswald_probe (struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *usbdev = interface_to_usbdev(intf);
- pauerswald_t cp = NULL;
- unsigned int u = 0;
- __le16 *pbuf;
- int ret;
-
- dbg ("probe: vendor id 0x%x, device id 0x%x",
- le16_to_cpu(usbdev->descriptor.idVendor),
- le16_to_cpu(usbdev->descriptor.idProduct));
-
- /* we use only the first -and only- interface */
- if (intf->altsetting->desc.bInterfaceNumber != 0)
- return -ENODEV;
-
- /* allocate memory for our device and initialize it */
- cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL);
- if (cp == NULL) {
- err ("out of memory");
- goto pfail;
- }
-
- /* Initialize device descriptor */
- mutex_init(&cp->mutex);
- cp->usbdev = usbdev;
- auerchain_init (&cp->controlchain);
- auerbuf_init (&cp->bufctl);
- init_waitqueue_head (&cp->bufferwait);
-
- ret = usb_register_dev(intf, &auerswald_class);
- if (ret) {
- err ("Not able to get a minor for this device.");
- goto pfail;
- }
-
- /* Give the device a name */
- sprintf (cp->name, "usb/auer%d", intf->minor);
-
- /* Store the index */
- cp->dtindex = intf->minor;
-
- /* Get the usb version of the device */
- cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice);
- dbg ("Version is %X", cp->version);
-
- /* allow some time to settle the device */
- msleep(334);
-
- /* Try to get a suitable textual description of the device */
- /* Device name:*/
- ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1);
- if (ret >= 0) {
- u += ret;
- /* Append Serial Number */
- memcpy(&cp->dev_desc[u], ",Ser# ", 6);
- u += 6;
- ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1);
- if (ret >= 0) {
- u += ret;
- /* Append subscriber number */
- memcpy(&cp->dev_desc[u], ", ", 2);
- u += 2;
- ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1);
- if (ret >= 0) {
- u += ret;
- }
- }
- }
- cp->dev_desc[u] = '\0';
- info("device is a %s", cp->dev_desc);
-
- /* get the maximum allowed control transfer length */
- pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */
- if (!pbuf) {
- err( "out of memory");
- goto pfail;
- }
- ret = usb_control_msg(cp->usbdev, /* pointer to device */
- usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */
- AUV_GETINFO, /* USB message request value */
- AUT_RREQ, /* USB message request type value */
- 0, /* USB message value */
- AUDI_MBCTRANS, /* USB message index value */
- pbuf, /* pointer to the receive buffer */
- 2, /* length of the buffer */
- 2000); /* time to wait for the message to complete before timing out */
- if (ret == 2) {
- cp->maxControlLength = le16_to_cpup(pbuf);
- kfree(pbuf);
- dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength);
- } else {
- kfree(pbuf);
- err("setup: getting max. allowed control transfer length failed with error %d", ret);
- goto pfail;
- }
-
- /* allocate a chain for the control messages */
- if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) {
- err ("out of memory");
- goto pfail;
- }
-
- /* allocate buffers for control messages */
- if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) {
- err ("out of memory");
- goto pfail;
- }
-
- /* start the interrupt endpoint */
- if (auerswald_int_open (cp)) {
- err ("int endpoint failed");
- goto pfail;
- }
-
- /* all OK */
- usb_set_intfdata (intf, cp);
- return 0;
-
- /* Error exit: clean up the memory */
-pfail: auerswald_delete (cp);
- return -EIO;
-}
-
-
-/* Disconnect driver from a served device
-
- This function is called whenever a device which was served by this driver
- is disconnected.
-
- The argument dev specifies the device context and the driver_context
- returns a pointer to the previously registered driver_context of the
- probe function. After returning from the disconnect function the USB
- framework completely deallocates all data structures associated with
- this device. So especially the usb_device structure must not be used
- any longer by the usb driver.
-*/
-static void auerswald_disconnect (struct usb_interface *intf)
-{
- pauerswald_t cp = usb_get_intfdata (intf);
- unsigned int u;
-
- usb_set_intfdata (intf, NULL);
- if (!cp)
- return;
-
- /* give back our USB minor number */
- usb_deregister_dev(intf, &auerswald_class);
-
- mutex_lock(&cp->mutex);
- info ("device /dev/%s now disconnecting", cp->name);
-
- /* Stop the interrupt endpoint */
- auerswald_int_release (cp);
-
- /* remove the control chain allocated in auerswald_probe
- This has the benefit of
- a) all pending (a)synchronous urbs are unlinked
- b) all buffers dealing with urbs are reclaimed
- */
- auerchain_free (&cp->controlchain);
-
- if (cp->open_count == 0) {
- /* nobody is using this device. So we can clean up now */
- mutex_unlock(&cp->mutex);
- /* mutex_unlock() is possible here because no other task
- can open the device (see above). I don't want
- to kfree() a locked mutex. */
-
- auerswald_delete (cp);
- } else {
- /* device is used. Remove the pointer to the
- usb device (it's not valid any more). The last
- release() will do the clean up */
- cp->usbdev = NULL;
- mutex_unlock(&cp->mutex);
- /* Terminate waiting writers */
- wake_up (&cp->bufferwait);
- /* Inform all waiting readers */
- for ( u = 0; u < AUH_TYPESIZE; u++) {
- pauerscon_t scp = cp->services[u];
- if (scp)
- scp->disconnect( scp);
- }
- }
-}
-
-/* Descriptor for the devices which are served by this driver.
- NOTE: this struct is parsed by the usbmanager install scripts.
- Don't change without caution!
-*/
-static struct usb_device_id auerswald_ids [] = {
- { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */
- { USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */
- { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */
- { } /* Terminating entry */
-};
-
-/* Standard module device table */
-MODULE_DEVICE_TABLE (usb, auerswald_ids);
-
-/* Standard usb driver struct */
-static struct usb_driver auerswald_driver = {
- .name = "auerswald",
- .probe = auerswald_probe,
- .disconnect = auerswald_disconnect,
- .id_table = auerswald_ids,
-};
-
-
-/* --------------------------------------------------------------------- */
-/* Module loading/unloading */
-
-/* Driver initialisation. Called after module loading.
- NOTE: there is no concurrency at _init
-*/
-static int __init auerswald_init (void)
-{
- int result;
- dbg ("init");
-
- /* register driver at the USB subsystem */
- result = usb_register (&auerswald_driver);
- if (result < 0) {
- err ("driver could not be registered");
- return -1;
- }
- return 0;
-}
-
-/* Driver deinit. Called before module removal.
- NOTE: there is no concurrency at _cleanup
-*/
-static void __exit auerswald_cleanup (void)
-{
- dbg ("cleanup");
- usb_deregister (&auerswald_driver);
-}
-
-/* --------------------------------------------------------------------- */
-/* Linux device driver module description */
-
-MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_DESCRIPTION (DRIVER_DESC);
-MODULE_LICENSE ("GPL");
-
-module_init (auerswald_init);
-module_exit (auerswald_cleanup);
-
-/* --------------------------------------------------------------------- */
-
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index d94aa7387608..b897f6554ecd 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -48,7 +48,8 @@ static int isight_firmware_load(struct usb_interface *intf,
if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) {
printk(KERN_ERR "Unable to load isight firmware\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
ptr = firmware->data;
@@ -91,7 +92,6 @@ static int isight_firmware_load(struct usb_interface *intf,
buf, llen, 300) != llen) {
printk(KERN_ERR
"Failed to load isight firmware\n");
- kfree(buf);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
new file mode 100644
index 000000000000..faca4333f27a
--- /dev/null
+++ b/drivers/usb/musb/Kconfig
@@ -0,0 +1,176 @@
+#
+# USB Dual Role (OTG-ready) Controller Drivers
+# for silicon based on Mentor Graphics INVENTRA designs
+#
+
+comment "Enable Host or Gadget support to see Inventra options"
+ depends on !USB && USB_GADGET=n
+
+# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
+config USB_MUSB_HDRC
+ depends on (USB || USB_GADGET) && HAVE_CLK
+ select TWL4030_USB if MACH_OMAP_3430SDP
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
+ help
+ Say Y here if your system has a dual role high speed USB
+ controller based on the Mentor Graphics silicon IP. Then
+ configure options to match your silicon and the board
+ it's being used with, including the USB peripheral role,
+ or the USB host role, or both.
+
+ Texas Instruments parts using this IP include DaVinci 644x,
+ OMAP 243x, OMAP 343x, and TUSB 6010.
+
+ If you do not know what this is, please say N.
+
+ To compile this driver as a module, choose M here; the
+ module will be called "musb_hdrc".
+
+config USB_MUSB_SOC
+ boolean
+ depends on USB_MUSB_HDRC
+ default y if ARCH_DAVINCI
+ default y if ARCH_OMAP2430
+ default y if ARCH_OMAP34XX
+ help
+ Use a static <asm/arch/hdrc_cnf.h> file to describe how the
+ controller is configured (endpoints, mechanisms, etc) on the
+ current iteration of a given system-on-chip.
+
+comment "DaVinci 644x USB support"
+ depends on USB_MUSB_HDRC && ARCH_DAVINCI
+
+comment "OMAP 243x high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP2430
+
+comment "OMAP 343x high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP34XX
+
+config USB_TUSB6010
+ boolean "TUSB 6010 support"
+ depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+ default y
+ help
+ The TUSB 6010 chip, from Texas Instruments, connects a discrete
+ HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
+ (a high speed serial link). It can use system-specific external
+ DMA controllers.
+
+choice
+ prompt "Driver Mode"
+ depends on USB_MUSB_HDRC
+ help
+ Dual-Role devices can support both host and peripheral roles,
+ as well as a the special "OTG Device" role which can switch
+ between both roles as needed.
+
+# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
+# OTG needs both roles, not just USB_MUSB_HOST.
+config USB_MUSB_HOST
+ depends on USB
+ bool "USB Host"
+ help
+ Say Y here if your system supports the USB host role.
+ If it has a USB "A" (rectangular), "Mini-A" (uncommon),
+ or "Mini-AB" connector, it supports the host role.
+ (With a "Mini-AB" connector, you should enable USB OTG.)
+
+# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
+# side support ... OTG needs both roles
+config USB_MUSB_PERIPHERAL
+ depends on USB_GADGET
+ bool "USB Peripheral (gadget stack)"
+ select USB_GADGET_MUSB_HDRC
+ help
+ Say Y here if your system supports the USB peripheral role.
+ If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
+ connector, it supports the peripheral role.
+ (With a "Mini-AB" connector, you should enable USB OTG.)
+
+config USB_MUSB_OTG
+ depends on USB && USB_GADGET && PM && EXPERIMENTAL
+ bool "Both host and peripheral: USB OTG (On The Go) Device"
+ select USB_GADGET_MUSB_HDRC
+ select USB_OTG
+ help
+ The most notable feature of USB OTG is support for a
+ "Dual-Role" device, which can act as either a device
+ or a host. The initial role choice can be changed
+ later, when two dual-role devices talk to each other.
+
+ At this writing, the OTG support in this driver is incomplete,
+ omitting the mandatory HNP or SRP protocols. However, some
+ of the cable based role switching works. (That is, grounding
+ the ID pin switches the controller to host mode, while leaving
+ it floating leaves it in peripheral mode.)
+
+ Select this if your system has a Mini-AB connector, or
+ to simplify certain kinds of configuration.
+
+ To implement your OTG Targeted Peripherals List (TPL), enable
+ USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
+ to match your requirements.
+
+endchoice
+
+# enable peripheral support (including with OTG)
+config USB_GADGET_MUSB_HDRC
+ bool
+ depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+# default y
+# select USB_GADGET_DUALSPEED
+# select USB_GADGET_SELECTED
+
+# enables host support (including with OTG)
+config USB_MUSB_HDRC_HCD
+ bool
+ depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
+ select USB_OTG if USB_GADGET_MUSB_HDRC
+ default y
+
+
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
+ depends on USB_MUSB_HDRC
+ default y if USB_TUSB6010
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not select 'n' here unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+config USB_INVENTRA_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ default ARCH_OMAP2430 || ARCH_OMAP34XX
+ help
+ Enable DMA transfers using Mentor's engine.
+
+config USB_TI_CPPI_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ default ARCH_DAVINCI
+ help
+ Enable DMA transfers when TI CPPI DMA is available.
+
+config USB_TUSB_OMAP_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ depends on USB_TUSB6010
+ depends on ARCH_OMAP
+ default y
+ help
+ Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+
+config USB_MUSB_LOGLEVEL
+ depends on USB_MUSB_HDRC
+ int 'Logging Level (0 - none / 3 - annoying / ... )'
+ default 0
+ help
+ Set the logging level. 0 disables the debugging altogether,
+ although when USB_DEBUG is set the value is at least 1.
+ Starting at level 3, per-transfer (urb, usb_request, packet,
+ or dma transfer) tracing may kick in.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
new file mode 100644
index 000000000000..88eb67de08ae
--- /dev/null
+++ b/drivers/usb/musb/Makefile
@@ -0,0 +1,86 @@
+#
+# for USB OTG silicon based on Mentor Graphics INVENTRA designs
+#
+
+musb_hdrc-objs := musb_core.o
+
+obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
+
+ifeq ($(CONFIG_ARCH_DAVINCI),y)
+ musb_hdrc-objs += davinci.o
+endif
+
+ifeq ($(CONFIG_USB_TUSB6010),y)
+ musb_hdrc-objs += tusb6010.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP2430),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP3430),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
+ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
+ musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o
+endif
+
+ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
+ musb_hdrc-objs += musb_virthub.o musb_host.o
+endif
+
+# the kconfig must guarantee that only one of the
+# possible I/O schemes will be enabled at a time ...
+# PIO only, or DMA (several potential schemes).
+# though PIO is always there to back up DMA, and for ep0
+
+ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
+
+ ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
+ musb_hdrc-objs += musbhsdma.o
+
+ else
+ ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
+ musb_hdrc-objs += cppi_dma.o
+
+ else
+ ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
+ musb_hdrc-objs += tusb6010_omap.o
+
+ endif
+ endif
+ endif
+endif
+
+
+################################################################################
+
+# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
+
+ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
+ EXTRA_CFLAGS += -DMUSB_AHB_ID
+endif
+
+# Debugging
+
+MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL)
+
+ifeq ("$(strip $(MUSB_DEBUG))","")
+ ifdef CONFIG_USB_DEBUG
+ MUSB_DEBUG:=1
+ else
+ MUSB_DEBUG:=0
+ endif
+endif
+
+ifneq ($(MUSB_DEBUG),0)
+ EXTRA_CFLAGS += -DDEBUG
+
+ ifeq ($(CONFIG_PROC_FS),y)
+ musb_hdrc-objs += musb_procfs.o
+ endif
+
+endif
+
+EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
new file mode 100644
index 000000000000..5ad6d0893cbe
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.c
@@ -0,0 +1,1540 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file implements a DMA interface using TI's CPPI DMA.
+ * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
+ * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
+ */
+
+#include <linux/usb.h>
+
+#include "musb_core.h"
+#include "cppi_dma.h"
+
+
+/* CPPI DMA status 7-mar-2006:
+ *
+ * - See musb_{host,gadget}.c for more info
+ *
+ * - Correct RX DMA generally forces the engine into irq-per-packet mode,
+ * which can easily saturate the CPU under non-mass-storage loads.
+ *
+ * NOTES 24-aug-2006 (2.6.18-rc4):
+ *
+ * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
+ * evidently after the 1 byte packet was received and acked, the queue
+ * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
+ * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
+ * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
+ * of its next (512 byte) packet. IRQ issues?
+ *
+ * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
+ * evidently also directly update the RX and TX CSRs ... so audit all
+ * host and peripheral side DMA code to avoid CSR access after DMA has
+ * been started.
+ */
+
+/* REVISIT now we can avoid preallocating these descriptors; or
+ * more simply, switch to a global freelist not per-channel ones.
+ * Note: at full speed, 64 descriptors == 4K bulk data.
+ */
+#define NUM_TXCHAN_BD 64
+#define NUM_RXCHAN_BD 64
+
+static inline void cpu_drain_writebuffer(void)
+{
+ wmb();
+#ifdef CONFIG_CPU_ARM926T
+ /* REVISIT this "should not be needed",
+ * but lack of it sure seemed to hurt ...
+ */
+ asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
+#endif
+}
+
+static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
+{
+ struct cppi_descriptor *bd = c->freelist;
+
+ if (bd)
+ c->freelist = bd->next;
+ return bd;
+}
+
+static inline void
+cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
+{
+ if (!bd)
+ return;
+ bd->next = c->freelist;
+ c->freelist = bd;
+}
+
+/*
+ * Start DMA controller
+ *
+ * Initialize the DMA controller as necessary.
+ */
+
+/* zero out entire rx state RAM entry for the channel */
+static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
+{
+ musb_writel(&rx->rx_skipbytes, 0, 0);
+ musb_writel(&rx->rx_head, 0, 0);
+ musb_writel(&rx->rx_sop, 0, 0);
+ musb_writel(&rx->rx_current, 0, 0);
+ musb_writel(&rx->rx_buf_current, 0, 0);
+ musb_writel(&rx->rx_len_len, 0, 0);
+ musb_writel(&rx->rx_cnt_cnt, 0, 0);
+}
+
+/* zero out entire tx state RAM entry for the channel */
+static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
+{
+ musb_writel(&tx->tx_head, 0, 0);
+ musb_writel(&tx->tx_buf, 0, 0);
+ musb_writel(&tx->tx_current, 0, 0);
+ musb_writel(&tx->tx_buf_current, 0, 0);
+ musb_writel(&tx->tx_info, 0, 0);
+ musb_writel(&tx->tx_rem_len, 0, 0);
+ /* musb_writel(&tx->tx_dummy, 0, 0); */
+ musb_writel(&tx->tx_complete, 0, ptr);
+}
+
+static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+{
+ int j;
+
+ /* initialize channel fields */
+ c->head = NULL;
+ c->tail = NULL;
+ c->last_processed = NULL;
+ c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+ c->controller = cppi;
+ c->is_rndis = 0;
+ c->freelist = NULL;
+
+ /* build the BD Free list for the channel */
+ for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
+ struct cppi_descriptor *bd;
+ dma_addr_t dma;
+
+ bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
+ bd->dma = dma;
+ cppi_bd_free(c, bd);
+ }
+}
+
+static int cppi_channel_abort(struct dma_channel *);
+
+static void cppi_pool_free(struct cppi_channel *c)
+{
+ struct cppi *cppi = c->controller;
+ struct cppi_descriptor *bd;
+
+ (void) cppi_channel_abort(&c->channel);
+ c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+ c->controller = NULL;
+
+ /* free all its bds */
+ bd = c->last_processed;
+ do {
+ if (bd)
+ dma_pool_free(cppi->pool, bd, bd->dma);
+ bd = cppi_bd_alloc(c);
+ } while (bd);
+ c->last_processed = NULL;
+}
+
+static int __init cppi_controller_start(struct dma_controller *c)
+{
+ struct cppi *controller;
+ void __iomem *tibase;
+ int i;
+
+ controller = container_of(c, struct cppi, controller);
+
+ /* do whatever is necessary to start controller */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ controller->tx[i].transmit = true;
+ controller->tx[i].index = i;
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+ controller->rx[i].transmit = false;
+ controller->rx[i].index = i;
+ }
+
+ /* setup BD list on a per channel basis */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
+ cppi_pool_init(controller, controller->tx + i);
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+ cppi_pool_init(controller, controller->rx + i);
+
+ tibase = controller->tibase;
+ INIT_LIST_HEAD(&controller->tx_complete);
+
+ /* initialise tx/rx channel head pointers to zero */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ struct cppi_channel *tx_ch = controller->tx + i;
+ struct cppi_tx_stateram __iomem *tx;
+
+ INIT_LIST_HEAD(&tx_ch->tx_complete);
+
+ tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
+ tx_ch->state_ram = tx;
+ cppi_reset_tx(tx, 0);
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+ struct cppi_channel *rx_ch = controller->rx + i;
+ struct cppi_rx_stateram __iomem *rx;
+
+ INIT_LIST_HEAD(&rx_ch->tx_complete);
+
+ rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
+ rx_ch->state_ram = rx;
+ cppi_reset_rx(rx);
+ }
+
+ /* enable individual cppi channels */
+ musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ /* enable tx/rx CPPI control */
+ musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+
+ /* disable RNDIS mode, also host rx RNDIS autorequest */
+ musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
+
+ return 0;
+}
+
+/*
+ * Stop DMA controller
+ *
+ * De-Init the DMA controller as necessary.
+ */
+
+static int cppi_controller_stop(struct dma_controller *c)
+{
+ struct cppi *controller;
+ void __iomem *tibase;
+ int i;
+
+ controller = container_of(c, struct cppi, controller);
+
+ tibase = controller->tibase;
+ /* DISABLE INDIVIDUAL CHANNEL Interrupts */
+ musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ DBG(1, "Tearing down RX and TX Channels\n");
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ /* FIXME restructure of txdma to use bds like rxdma */
+ controller->tx[i].last_processed = NULL;
+ cppi_pool_free(controller->tx + i);
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+ cppi_pool_free(controller->rx + i);
+
+ /* in Tx Case proper teardown is supported. We resort to disabling
+ * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
+ * complete TX CPPI cannot be disabled.
+ */
+ /*disable tx/rx cppi */
+ musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+
+ return 0;
+}
+
+/* While dma channel is allocated, we only want the core irqs active
+ * for fault reports, otherwise we'd get irqs that we don't care about.
+ * Except for TX irqs, where dma done != fifo empty and reusable ...
+ *
+ * NOTE: docs don't say either way, but irq masking **enables** irqs.
+ *
+ * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
+ */
+static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
+}
+
+static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
+}
+
+
+/*
+ * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
+ * each transfer direction of a non-control endpoint, so allocating
+ * (and deallocating) is mostly a way to notice bad housekeeping on
+ * the software side. We assume the irqs are always active.
+ */
+static struct dma_channel *
+cppi_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *ep, u8 transmit)
+{
+ struct cppi *controller;
+ u8 index;
+ struct cppi_channel *cppi_ch;
+ void __iomem *tibase;
+
+ controller = container_of(c, struct cppi, controller);
+ tibase = controller->tibase;
+
+ /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
+ index = ep->epnum - 1;
+
+ /* return the corresponding CPPI Channel Handle, and
+ * probably disable the non-CPPI irq until we need it.
+ */
+ if (transmit) {
+ if (index >= ARRAY_SIZE(controller->tx)) {
+ DBG(1, "no %cX%d CPPI channel\n", 'T', index);
+ return NULL;
+ }
+ cppi_ch = controller->tx + index;
+ } else {
+ if (index >= ARRAY_SIZE(controller->rx)) {
+ DBG(1, "no %cX%d CPPI channel\n", 'R', index);
+ return NULL;
+ }
+ cppi_ch = controller->rx + index;
+ core_rxirq_disable(tibase, ep->epnum);
+ }
+
+ /* REVISIT make this an error later once the same driver code works
+ * with the other DMA engine too
+ */
+ if (cppi_ch->hw_ep)
+ DBG(1, "re-allocating DMA%d %cX channel %p\n",
+ index, transmit ? 'T' : 'R', cppi_ch);
+ cppi_ch->hw_ep = ep;
+ cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+ return &cppi_ch->channel;
+}
+
+/* Release a CPPI Channel. */
+static void cppi_channel_release(struct dma_channel *channel)
+{
+ struct cppi_channel *c;
+ void __iomem *tibase;
+
+ /* REVISIT: for paranoia, check state and abort if needed... */
+
+ c = container_of(channel, struct cppi_channel, channel);
+ tibase = c->controller->tibase;
+ if (!c->hw_ep)
+ DBG(1, "releasing idle DMA channel %p\n", c);
+ else if (!c->transmit)
+ core_rxirq_enable(tibase, c->index + 1);
+
+ /* for now, leave its cppi IRQ enabled (we won't trigger it) */
+ c->hw_ep = NULL;
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
+{
+ void __iomem *base = c->controller->mregs;
+ struct cppi_rx_stateram __iomem *rx = c->state_ram;
+
+ musb_ep_select(base, c->index + 1);
+
+ DBG(level, "RX DMA%d%s: %d left, csr %04x, "
+ "%08x H%08x S%08x C%08x, "
+ "B%08x L%08x %08x .. %08x"
+ "\n",
+ c->index, tag,
+ musb_readl(c->controller->tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
+ musb_readw(c->hw_ep->regs, MUSB_RXCSR),
+
+ musb_readl(&rx->rx_skipbytes, 0),
+ musb_readl(&rx->rx_head, 0),
+ musb_readl(&rx->rx_sop, 0),
+ musb_readl(&rx->rx_current, 0),
+
+ musb_readl(&rx->rx_buf_current, 0),
+ musb_readl(&rx->rx_len_len, 0),
+ musb_readl(&rx->rx_cnt_cnt, 0),
+ musb_readl(&rx->rx_complete, 0)
+ );
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
+{
+ void __iomem *base = c->controller->mregs;
+ struct cppi_tx_stateram __iomem *tx = c->state_ram;
+
+ musb_ep_select(base, c->index + 1);
+
+ DBG(level, "TX DMA%d%s: csr %04x, "
+ "H%08x S%08x C%08x %08x, "
+ "F%08x L%08x .. %08x"
+ "\n",
+ c->index, tag,
+ musb_readw(c->hw_ep->regs, MUSB_TXCSR),
+
+ musb_readl(&tx->tx_head, 0),
+ musb_readl(&tx->tx_buf, 0),
+ musb_readl(&tx->tx_current, 0),
+ musb_readl(&tx->tx_buf_current, 0),
+
+ musb_readl(&tx->tx_info, 0),
+ musb_readl(&tx->tx_rem_len, 0),
+ /* dummy/unused word 6 */
+ musb_readl(&tx->tx_complete, 0)
+ );
+}
+
+/* Context: controller irqlocked */
+static inline void
+cppi_rndis_update(struct cppi_channel *c, int is_rx,
+ void __iomem *tibase, int is_rndis)
+{
+ /* we may need to change the rndis flag for this cppi channel */
+ if (c->is_rndis != is_rndis) {
+ u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
+ u32 temp = 1 << (c->index);
+
+ if (is_rx)
+ temp <<= 16;
+ if (is_rndis)
+ value |= temp;
+ else
+ value &= ~temp;
+ musb_writel(tibase, DAVINCI_RNDIS_REG, value);
+ c->is_rndis = is_rndis;
+ }
+}
+
+static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
+{
+ pr_debug("RXBD/%s %08x: "
+ "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
+ tag, bd->dma,
+ bd->hw_next, bd->hw_bufp, bd->hw_off_len,
+ bd->hw_options);
+}
+
+static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
+{
+#if MUSB_DEBUG > 0
+ struct cppi_descriptor *bd;
+
+ if (!_dbg_level(level))
+ return;
+ cppi_dump_rx(level, rx, tag);
+ if (rx->last_processed)
+ cppi_dump_rxbd("last", rx->last_processed);
+ for (bd = rx->head; bd; bd = bd->next)
+ cppi_dump_rxbd("active", bd);
+#endif
+}
+
+
+/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
+ * so we won't ever use it (see "CPPI RX Woes" below).
+ */
+static inline int cppi_autoreq_update(struct cppi_channel *rx,
+ void __iomem *tibase, int onepacket, unsigned n_bds)
+{
+ u32 val;
+
+#ifdef RNDIS_RX_IS_USABLE
+ u32 tmp;
+ /* assert(is_host_active(musb)) */
+
+ /* start from "AutoReq never" */
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ val = tmp & ~((0x3) << (rx->index * 2));
+
+ /* HCD arranged reqpkt for packet #1. we arrange int
+ * for all but the last one, maybe in two segments.
+ */
+ if (!onepacket) {
+#if 0
+ /* use two segments, autoreq "all" then the last "never" */
+ val |= ((0x3) << (rx->index * 2));
+ n_bds--;
+#else
+ /* one segment, autoreq "all-but-last" */
+ val |= ((0x1) << (rx->index * 2));
+#endif
+ }
+
+ if (val != tmp) {
+ int n = 100;
+
+ /* make sure that autoreq is updated before continuing */
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
+ do {
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ if (tmp == val)
+ break;
+ cpu_relax();
+ } while (n-- > 0);
+ }
+#endif
+
+ /* REQPKT is turned off after each segment */
+ if (n_bds && rx->channel.actual_len) {
+ void __iomem *regs = rx->hw_ep->regs;
+
+ val = musb_readw(regs, MUSB_RXCSR);
+ if (!(val & MUSB_RXCSR_H_REQPKT)) {
+ val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR, val);
+ /* flush writebufer */
+ val = musb_readw(regs, MUSB_RXCSR);
+ }
+ }
+ return n_bds;
+}
+
+
+/* Buffer enqueuing Logic:
+ *
+ * - RX builds new queues each time, to help handle routine "early
+ * termination" cases (faults, including errors and short reads)
+ * more correctly.
+ *
+ * - for now, TX reuses the same queue of BDs every time
+ *
+ * REVISIT long term, we want a normal dynamic model.
+ * ... the goal will be to append to the
+ * existing queue, processing completed "dma buffers" (segments) on the fly.
+ *
+ * Otherwise we force an IRQ latency between requests, which slows us a lot
+ * (especially in "transparent" dma). Unfortunately that model seems to be
+ * inherent in the DMA model from the Mentor code, except in the rare case
+ * of transfers big enough (~128+ KB) that we could append "middle" segments
+ * in the TX paths. (RX can't do this, see below.)
+ *
+ * That's true even in the CPPI- friendly iso case, where most urbs have
+ * several small segments provided in a group and where the "packet at a time"
+ * "transparent" DMA model is always correct, even on the RX side.
+ */
+
+/*
+ * CPPI TX:
+ * ========
+ * TX is a lot more reasonable than RX; it doesn't need to run in
+ * irq-per-packet mode very often. RNDIS mode seems to behave too
+ * (except how it handles the exactly-N-packets case). Building a
+ * txdma queue with multiple requests (urb or usb_request) looks
+ * like it would work ... but fault handling would need much testing.
+ *
+ * The main issue with TX mode RNDIS relates to transfer lengths that
+ * are an exact multiple of the packet length. It appears that there's
+ * a hiccup in that case (maybe the DMA completes before the ZLP gets
+ * written?) boiling down to not being able to rely on CPPI writing any
+ * terminating zero length packet before the next transfer is written.
+ * So that's punted to PIO; better yet, gadget drivers can avoid it.
+ *
+ * Plus, there's allegedly an undocumented constraint that rndis transfer
+ * length be a multiple of 64 bytes ... but the chip doesn't act that
+ * way, and we really don't _want_ that behavior anyway.
+ *
+ * On TX, "transparent" mode works ... although experiments have shown
+ * problems trying to use the SOP/EOP bits in different USB packets.
+ *
+ * REVISIT try to handle terminating zero length packets using CPPI
+ * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
+ * links avoid that issue by forcing them to avoid zlps.)
+ */
+static void
+cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
+{
+ unsigned maxpacket = tx->maxpacket;
+ dma_addr_t addr = tx->buf_dma + tx->offset;
+ size_t length = tx->buf_len - tx->offset;
+ struct cppi_descriptor *bd;
+ unsigned n_bds;
+ unsigned i;
+ struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
+ int rndis;
+
+ /* TX can use the CPPI "rndis" mode, where we can probably fit this
+ * transfer in one BD and one IRQ. The only time we would NOT want
+ * to use it is when hardware constraints prevent it, or if we'd
+ * trigger the "send a ZLP?" confusion.
+ */
+ rndis = (maxpacket & 0x3f) == 0
+ && length < 0xffff
+ && (length % maxpacket) != 0;
+
+ if (rndis) {
+ maxpacket = length;
+ n_bds = 1;
+ } else {
+ n_bds = length / maxpacket;
+ if (!length || (length % maxpacket))
+ n_bds++;
+ n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
+ length = min(n_bds * maxpacket, length);
+ }
+
+ DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+ tx->index,
+ maxpacket,
+ rndis ? "rndis" : "transparent",
+ n_bds,
+ addr, length);
+
+ cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
+
+ /* assuming here that channel_program is called during
+ * transfer initiation ... current code maintains state
+ * for one outstanding request only (no queues, not even
+ * the implicit ones of an iso urb).
+ */
+
+ bd = tx->freelist;
+ tx->head = bd;
+ tx->last_processed = NULL;
+
+ /* FIXME use BD pool like RX side does, and just queue
+ * the minimum number for this request.
+ */
+
+ /* Prepare queue of BDs first, then hand it to hardware.
+ * All BDs except maybe the last should be of full packet
+ * size; for RNDIS there _is_ only that last packet.
+ */
+ for (i = 0; i < n_bds; ) {
+ if (++i < n_bds && bd->next)
+ bd->hw_next = bd->next->dma;
+ else
+ bd->hw_next = 0;
+
+ bd->hw_bufp = tx->buf_dma + tx->offset;
+
+ /* FIXME set EOP only on the last packet,
+ * SOP only on the first ... avoid IRQs
+ */
+ if ((tx->offset + maxpacket) <= tx->buf_len) {
+ tx->offset += maxpacket;
+ bd->hw_off_len = maxpacket;
+ bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | maxpacket;
+ } else {
+ /* only this one may be a partial USB Packet */
+ u32 partial_len;
+
+ partial_len = tx->buf_len - tx->offset;
+ tx->offset = tx->buf_len;
+ bd->hw_off_len = partial_len;
+
+ bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | partial_len;
+ if (partial_len == 0)
+ bd->hw_options |= CPPI_ZERO_SET;
+ }
+
+ DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+ bd, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options);
+
+ /* update the last BD enqueued to the list */
+ tx->tail = bd;
+ bd = bd->next;
+ }
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* Write to the HeadPtr in state RAM to trigger */
+ musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
+
+ cppi_dump_tx(5, tx, "/S");
+}
+
+/*
+ * CPPI RX Woes:
+ * =============
+ * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
+ * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
+ * (Full speed transfers have similar scenarios.)
+ *
+ * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
+ * and the next packet goes into a buffer that's queued later; while (b) fills
+ * the buffer with 1024 bytes. How to do that with CPPI?
+ *
+ * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
+ * (b) loses **BADLY** because nothing (!) happens when that second packet
+ * fills the buffer, much less when a third one arrives. (Which makes this
+ * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
+ * is optional, and it's fine if peripherals -- not hosts! -- pad messages
+ * out to end-of-buffer. Standard PCI host controller DMA descriptors
+ * implement that mode by default ... which is no accident.)
+ *
+ * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
+ * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
+ * ignores SOP/EOP markings and processes both of those BDs; so both packets
+ * are loaded into the buffer (with a 212 byte gap between them), and the next
+ * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
+ * are intended as outputs for RX queues, not inputs...)
+ *
+ * - A variant of "transparent" mode -- one BD at a time -- is the only way to
+ * reliably make both cases work, with software handling both cases correctly
+ * and at the significant penalty of needing an IRQ per packet. (The lack of
+ * I/O overlap can be slightly ameliorated by enabling double buffering.)
+ *
+ * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
+ * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
+ * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
+ * with guaranteed driver level fault recovery and scrubbing out what's left
+ * of that garbaged datastream.
+ *
+ * But there seems to be no way to identify the cases where CPPI RNDIS mode
+ * is appropriate -- which do NOT include RNDIS host drivers, but do include
+ * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
+ * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
+ * that applies best on the peripheral side (and which could fail rudely).
+ *
+ * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
+ * cases other than mass storage class. Otherwise we're correct but slow,
+ * since CPPI penalizes our need for a "true RNDIS" default mode.
+ */
+
+
+/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
+ *
+ * IFF
+ * (a) peripheral mode ... since rndis peripherals could pad their
+ * writes to hosts, causing i/o failure; or we'd have to cope with
+ * a largely unknowable variety of host side protocol variants
+ * (b) and short reads are NOT errors ... since full reads would
+ * cause those same i/o failures
+ * (c) and read length is
+ * - less than 64KB (max per cppi descriptor)
+ * - not a multiple of 4096 (g_zero default, full reads typical)
+ * - N (>1) packets long, ditto (full reads not EXPECTED)
+ * THEN
+ * try rx rndis mode
+ *
+ * Cost of heuristic failing: RXDMA wedges at the end of transfers that
+ * fill out the whole buffer. Buggy host side usb network drivers could
+ * trigger that, but "in the field" such bugs seem to be all but unknown.
+ *
+ * So this module parameter lets the heuristic be disabled. When using
+ * gadgetfs, the heuristic will probably need to be disabled.
+ */
+static int cppi_rx_rndis = 1;
+
+module_param(cppi_rx_rndis, bool, 0);
+MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
+
+
+/**
+ * cppi_next_rx_segment - dma read for the next chunk of a buffer
+ * @musb: the controller
+ * @rx: dma channel
+ * @onepacket: true unless caller treats short reads as errors, and
+ * performs fault recovery above usbcore.
+ * Context: controller irqlocked
+ *
+ * See above notes about why we can't use multi-BD RX queues except in
+ * rare cases (mass storage class), and can never use the hardware "rndis"
+ * mode (since it's not a "true" RNDIS mode) with complete safety..
+ *
+ * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
+ * code to recover from corrupted datastreams after each short transfer.
+ */
+static void
+cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
+{
+ unsigned maxpacket = rx->maxpacket;
+ dma_addr_t addr = rx->buf_dma + rx->offset;
+ size_t length = rx->buf_len - rx->offset;
+ struct cppi_descriptor *bd, *tail;
+ unsigned n_bds;
+ unsigned i;
+ void __iomem *tibase = musb->ctrl_base;
+ int is_rndis = 0;
+ struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
+
+ if (onepacket) {
+ /* almost every USB driver, host or peripheral side */
+ n_bds = 1;
+
+ /* maybe apply the heuristic above */
+ if (cppi_rx_rndis
+ && is_peripheral_active(musb)
+ && length > maxpacket
+ && (length & ~0xffff) == 0
+ && (length & 0x0fff) != 0
+ && (length & (maxpacket - 1)) == 0) {
+ maxpacket = length;
+ is_rndis = 1;
+ }
+ } else {
+ /* virtually nothing except mass storage class */
+ if (length > 0xffff) {
+ n_bds = 0xffff / maxpacket;
+ length = n_bds * maxpacket;
+ } else {
+ n_bds = length / maxpacket;
+ if (length % maxpacket)
+ n_bds++;
+ }
+ if (n_bds == 1)
+ onepacket = 1;
+ else
+ n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
+ }
+
+ /* In host mode, autorequest logic can generate some IN tokens; it's
+ * tricky since we can't leave REQPKT set in RXCSR after the transfer
+ * finishes. So: multipacket transfers involve two or more segments.
+ * And always at least two IRQs ... RNDIS mode is not an option.
+ */
+ if (is_host_active(musb))
+ n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
+
+ cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
+
+ length = min(n_bds * maxpacket, length);
+
+ DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+ "dma 0x%x len %u %u/%u\n",
+ rx->index, maxpacket,
+ onepacket
+ ? (is_rndis ? "rndis" : "onepacket")
+ : "multipacket",
+ n_bds,
+ musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff,
+ addr, length, rx->channel.actual_len, rx->buf_len);
+
+ /* only queue one segment at a time, since the hardware prevents
+ * correct queue shutdown after unexpected short packets
+ */
+ bd = cppi_bd_alloc(rx);
+ rx->head = bd;
+
+ /* Build BDs for all packets in this segment */
+ for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
+ u32 bd_len;
+
+ if (i) {
+ bd = cppi_bd_alloc(rx);
+ if (!bd)
+ break;
+ tail->next = bd;
+ tail->hw_next = bd->dma;
+ }
+ bd->hw_next = 0;
+
+ /* all but the last packet will be maxpacket size */
+ if (maxpacket < length)
+ bd_len = maxpacket;
+ else
+ bd_len = length;
+
+ bd->hw_bufp = addr;
+ addr += bd_len;
+ rx->offset += bd_len;
+
+ bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
+ bd->buflen = bd_len;
+
+ bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
+ length -= bd_len;
+ }
+
+ /* we always expect at least one reusable BD! */
+ if (!tail) {
+ WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
+ return;
+ } else if (i < n_bds)
+ WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
+
+ tail->next = NULL;
+ tail->hw_next = 0;
+
+ bd = rx->head;
+ rx->tail = tail;
+
+ /* short reads and other faults should terminate this entire
+ * dma segment. we want one "dma packet" per dma segment, not
+ * one per USB packet, terminating the whole queue at once...
+ * NOTE that current hardware seems to ignore SOP and EOP.
+ */
+ bd->hw_options |= CPPI_SOP_SET;
+ tail->hw_options |= CPPI_EOP_SET;
+
+ if (debug >= 5) {
+ struct cppi_descriptor *d;
+
+ for (d = rx->head; d; d = d->next)
+ cppi_dump_rxbd("S", d);
+ }
+
+ /* in case the preceding transfer left some state... */
+ tail = rx->last_processed;
+ if (tail) {
+ tail->next = bd;
+ tail->hw_next = bd->dma;
+ }
+
+ core_rxirq_enable(tibase, rx->index + 1);
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* REVISIT specs say to write this AFTER the BUFCNT register
+ * below ... but that loses badly.
+ */
+ musb_writel(&rx_ram->rx_head, 0, bd->dma);
+
+ /* bufferCount must be at least 3, and zeroes on completion
+ * unless it underflows below zero, or stops at two, or keeps
+ * growing ... grr.
+ */
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff;
+
+ if (!i)
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds + 2);
+ else if (n_bds > (i - 3))
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds - (i - 3));
+
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff;
+ if (i < (2 + n_bds)) {
+ DBG(2, "bufcnt%d underrun - %d (for %d)\n",
+ rx->index, i, n_bds);
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds + 2);
+ }
+
+ cppi_dump_rx(4, rx, "/S");
+}
+
+/**
+ * cppi_channel_program - program channel for data transfer
+ * @ch: the channel
+ * @maxpacket: max packet size
+ * @mode: For RX, 1 unless the usb protocol driver promised to treat
+ * all short reads as errors and kick in high level fault recovery.
+ * For TX, ignored because of RNDIS mode races/glitches.
+ * @dma_addr: dma address of buffer
+ * @len: length of buffer
+ * Context: controller irqlocked
+ */
+static int cppi_channel_program(struct dma_channel *ch,
+ u16 maxpacket, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct cppi_channel *cppi_ch;
+ struct cppi *controller;
+ struct musb *musb;
+
+ cppi_ch = container_of(ch, struct cppi_channel, channel);
+ controller = cppi_ch->controller;
+ musb = controller->musb;
+
+ switch (ch->status) {
+ case MUSB_DMA_STATUS_BUS_ABORT:
+ case MUSB_DMA_STATUS_CORE_ABORT:
+ /* fault irq handler should have handled cleanup */
+ WARNING("%cX DMA%d not cleaned up after abort!\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* WARN_ON(1); */
+ break;
+ case MUSB_DMA_STATUS_BUSY:
+ WARNING("program active channel? %cX DMA%d\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* WARN_ON(1); */
+ break;
+ case MUSB_DMA_STATUS_UNKNOWN:
+ DBG(1, "%cX DMA%d not allocated!\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* FALLTHROUGH */
+ case MUSB_DMA_STATUS_FREE:
+ break;
+ }
+
+ ch->status = MUSB_DMA_STATUS_BUSY;
+
+ /* set transfer parameters, then queue up its first segment */
+ cppi_ch->buf_dma = dma_addr;
+ cppi_ch->offset = 0;
+ cppi_ch->maxpacket = maxpacket;
+ cppi_ch->buf_len = len;
+
+ /* TX channel? or RX? */
+ if (cppi_ch->transmit)
+ cppi_next_tx_segment(musb, cppi_ch);
+ else
+ cppi_next_rx_segment(musb, cppi_ch, mode);
+
+ return true;
+}
+
+static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
+{
+ struct cppi_channel *rx = &cppi->rx[ch];
+ struct cppi_rx_stateram __iomem *state = rx->state_ram;
+ struct cppi_descriptor *bd;
+ struct cppi_descriptor *last = rx->last_processed;
+ bool completed = false;
+ bool acked = false;
+ int i;
+ dma_addr_t safe2ack;
+ void __iomem *regs = rx->hw_ep->regs;
+
+ cppi_dump_rx(6, rx, "/K");
+
+ bd = last ? last->next : rx->head;
+ if (!bd)
+ return false;
+
+ /* run through all completed BDs */
+ for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
+ (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ /* catch latest BD writes from CPPI */
+ rmb();
+ if (!completed && (bd->hw_options & CPPI_OWN_SET))
+ break;
+
+ DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+ "off.len %08x opt.len %08x (%d)\n",
+ bd->dma, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options,
+ rx->channel.actual_len);
+
+ /* actual packet received length */
+ if ((bd->hw_options & CPPI_SOP_SET) && !completed)
+ len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
+ else
+ len = 0;
+
+ if (bd->hw_options & CPPI_EOQ_MASK)
+ completed = true;
+
+ if (!completed && len < bd->buflen) {
+ /* NOTE: when we get a short packet, RXCSR_H_REQPKT
+ * must have been cleared, and no more DMA packets may
+ * active be in the queue... TI docs didn't say, but
+ * CPPI ignores those BDs even though OWN is still set.
+ */
+ completed = true;
+ DBG(3, "rx short %d/%d (%d)\n",
+ len, bd->buflen,
+ rx->channel.actual_len);
+ }
+
+ /* If we got here, we expect to ack at least one BD; meanwhile
+ * CPPI may completing other BDs while we scan this list...
+ *
+ * RACE: we can notice OWN cleared before CPPI raises the
+ * matching irq by writing that BD as the completion pointer.
+ * In such cases, stop scanning and wait for the irq, avoiding
+ * lost acks and states where BD ownership is unclear.
+ */
+ if (bd->dma == safe2ack) {
+ musb_writel(&state->rx_complete, 0, safe2ack);
+ safe2ack = musb_readl(&state->rx_complete, 0);
+ acked = true;
+ if (bd->dma == safe2ack)
+ safe2ack = 0;
+ }
+
+ rx->channel.actual_len += len;
+
+ cppi_bd_free(rx, last);
+ last = bd;
+
+ /* stop scanning on end-of-segment */
+ if (bd->hw_next == 0)
+ completed = true;
+ }
+ rx->last_processed = last;
+
+ /* dma abort, lost ack, or ... */
+ if (!acked && last) {
+ int csr;
+
+ if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
+ musb_writel(&state->rx_complete, 0, safe2ack);
+ if (safe2ack == 0) {
+ cppi_bd_free(rx, last);
+ rx->last_processed = NULL;
+
+ /* if we land here on the host side, H_REQPKT will
+ * be clear and we need to restart the queue...
+ */
+ WARN_ON(rx->head);
+ }
+ musb_ep_select(cppi->mregs, rx->index + 1);
+ csr = musb_readw(regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_DMAENAB) {
+ DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+ rx->index,
+ rx->head, rx->tail,
+ rx->last_processed
+ ? rx->last_processed->dma
+ : 0,
+ completed ? ", completed" : "",
+ csr);
+ cppi_dump_rxq(4, "/what?", rx);
+ }
+ }
+ if (!completed) {
+ int csr;
+
+ rx->head = bd;
+
+ /* REVISIT seems like "autoreq all but EOP" doesn't...
+ * setting it here "should" be racey, but seems to work
+ */
+ csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+ if (is_host_active(cppi->musb)
+ && bd
+ && !(csr & MUSB_RXCSR_H_REQPKT)) {
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(regs, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | csr);
+ csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+ }
+ } else {
+ rx->head = NULL;
+ rx->tail = NULL;
+ }
+
+ cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
+ return completed;
+}
+
+void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+{
+ void __iomem *tibase;
+ int i, index;
+ struct cppi *cppi;
+ struct musb_hw_ep *hw_ep = NULL;
+
+ cppi = container_of(musb->dma_controller, struct cppi, controller);
+
+ tibase = musb->ctrl_base;
+
+ /* process TX channels */
+ for (index = 0; tx; tx = tx >> 1, index++) {
+ struct cppi_channel *tx_ch;
+ struct cppi_tx_stateram __iomem *tx_ram;
+ bool completed = false;
+ struct cppi_descriptor *bd;
+
+ if (!(tx & 1))
+ continue;
+
+ tx_ch = cppi->tx + index;
+ tx_ram = tx_ch->state_ram;
+
+ /* FIXME need a cppi_tx_scan() routine, which
+ * can also be called from abort code
+ */
+
+ cppi_dump_tx(5, tx_ch, "/E");
+
+ bd = tx_ch->head;
+
+ if (NULL == bd) {
+ DBG(1, "null BD\n");
+ continue;
+ }
+
+ /* run through all completed BDs */
+ for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ /* catch latest BD writes from CPPI */
+ rmb();
+ if (bd->hw_options & CPPI_OWN_SET)
+ break;
+
+ DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
+ bd, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options);
+
+ len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
+ tx_ch->channel.actual_len += len;
+
+ tx_ch->last_processed = bd;
+
+ /* write completion register to acknowledge
+ * processing of completed BDs, and possibly
+ * release the IRQ; EOQ might not be set ...
+ *
+ * REVISIT use the same ack strategy as rx
+ *
+ * REVISIT have observed bit 18 set; huh??
+ */
+ /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
+ musb_writel(&tx_ram->tx_complete, 0, bd->dma);
+
+ /* stop scanning on end-of-segment */
+ if (bd->hw_next == 0)
+ completed = true;
+ }
+
+ /* on end of segment, maybe go to next one */
+ if (completed) {
+ /* cppi_dump_tx(4, tx_ch, "/complete"); */
+
+ /* transfer more, or report completion */
+ if (tx_ch->offset >= tx_ch->buf_len) {
+ tx_ch->head = NULL;
+ tx_ch->tail = NULL;
+ tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ hw_ep = tx_ch->hw_ep;
+
+ /* Peripheral role never repurposes the
+ * endpoint, so immediate completion is
+ * safe. Host role waits for the fifo
+ * to empty (TXPKTRDY irq) before going
+ * to the next queued bulk transfer.
+ */
+ if (is_host_active(cppi->musb)) {
+#if 0
+ /* WORKAROUND because we may
+ * not always get TXKPTRDY ...
+ */
+ int csr;
+
+ csr = musb_readw(hw_ep->regs,
+ MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+#endif
+ completed = false;
+ }
+ if (completed)
+ musb_dma_completion(musb, index + 1, 1);
+
+ } else {
+ /* Bigger transfer than we could fit in
+ * that first batch of descriptors...
+ */
+ cppi_next_tx_segment(musb, tx_ch);
+ }
+ } else
+ tx_ch->head = bd;
+ }
+
+ /* Start processing the RX block */
+ for (index = 0; rx; rx = rx >> 1, index++) {
+
+ if (rx & 1) {
+ struct cppi_channel *rx_ch;
+
+ rx_ch = cppi->rx + index;
+
+ /* let incomplete dma segments finish */
+ if (!cppi_rx_scan(cppi, index))
+ continue;
+
+ /* start another dma segment if needed */
+ if (rx_ch->channel.actual_len != rx_ch->buf_len
+ && rx_ch->channel.actual_len
+ == rx_ch->offset) {
+ cppi_next_rx_segment(musb, rx_ch, 1);
+ continue;
+ }
+
+ /* all segments completed! */
+ rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ hw_ep = rx_ch->hw_ep;
+
+ core_rxirq_disable(tibase, index + 1);
+ musb_dma_completion(musb, index + 1, 0);
+ }
+ }
+
+ /* write to CPPI EOI register to re-enable interrupts */
+ musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+}
+
+/* Instantiate a software object representing a DMA controller. */
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *mregs)
+{
+ struct cppi *controller;
+
+ controller = kzalloc(sizeof *controller, GFP_KERNEL);
+ if (!controller)
+ return NULL;
+
+ controller->mregs = mregs;
+ controller->tibase = mregs - DAVINCI_BASE_OFFSET;
+
+ controller->musb = musb;
+ controller->controller.start = cppi_controller_start;
+ controller->controller.stop = cppi_controller_stop;
+ controller->controller.channel_alloc = cppi_channel_allocate;
+ controller->controller.channel_release = cppi_channel_release;
+ controller->controller.channel_program = cppi_channel_program;
+ controller->controller.channel_abort = cppi_channel_abort;
+
+ /* NOTE: allocating from on-chip SRAM would give the least
+ * contention for memory access, if that ever matters here.
+ */
+
+ /* setup BufferPool */
+ controller->pool = dma_pool_create("cppi",
+ controller->musb->controller,
+ sizeof(struct cppi_descriptor),
+ CPPI_DESCRIPTOR_ALIGN, 0);
+ if (!controller->pool) {
+ kfree(controller);
+ return NULL;
+ }
+
+ return &controller->controller;
+}
+
+/*
+ * Destroy a previously-instantiated DMA controller.
+ */
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct cppi *cppi;
+
+ cppi = container_of(c, struct cppi, controller);
+
+ /* assert: caller stopped the controller first */
+ dma_pool_destroy(cppi->pool);
+
+ kfree(cppi);
+}
+
+/*
+ * Context: controller irqlocked, endpoint selected
+ */
+static int cppi_channel_abort(struct dma_channel *channel)
+{
+ struct cppi_channel *cppi_ch;
+ struct cppi *controller;
+ void __iomem *mbase;
+ void __iomem *tibase;
+ void __iomem *regs;
+ u32 value;
+ struct cppi_descriptor *queue;
+
+ cppi_ch = container_of(channel, struct cppi_channel, channel);
+
+ controller = cppi_ch->controller;
+
+ switch (channel->status) {
+ case MUSB_DMA_STATUS_BUS_ABORT:
+ case MUSB_DMA_STATUS_CORE_ABORT:
+ /* from RX or TX fault irq handler */
+ case MUSB_DMA_STATUS_BUSY:
+ /* the hardware needs shutting down */
+ regs = cppi_ch->hw_ep->regs;
+ break;
+ case MUSB_DMA_STATUS_UNKNOWN:
+ case MUSB_DMA_STATUS_FREE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (!cppi_ch->transmit && cppi_ch->head)
+ cppi_dump_rxq(3, "/abort", cppi_ch);
+
+ mbase = controller->mregs;
+ tibase = controller->tibase;
+
+ queue = cppi_ch->head;
+ cppi_ch->head = NULL;
+ cppi_ch->tail = NULL;
+
+ /* REVISIT should rely on caller having done this,
+ * and caller should rely on us not changing it.
+ * peripheral code is safe ... check host too.
+ */
+ musb_ep_select(mbase, cppi_ch->index + 1);
+
+ if (cppi_ch->transmit) {
+ struct cppi_tx_stateram __iomem *tx_ram;
+ int enabled;
+
+ /* mask interrupts raised to signal teardown complete. */
+ enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
+ & (1 << cppi_ch->index);
+ if (enabled)
+ musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+ (1 << cppi_ch->index));
+
+ /* REVISIT put timeouts on these controller handshakes */
+
+ cppi_dump_tx(6, cppi_ch, " (teardown)");
+
+ /* teardown DMA engine then usb core */
+ do {
+ value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
+ } while (!(value & CPPI_TEAR_READY));
+ musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
+
+ tx_ram = cppi_ch->state_ram;
+ do {
+ value = musb_readl(&tx_ram->tx_complete, 0);
+ } while (0xFFFFFFFC != value);
+ musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
+
+ /* FIXME clean up the transfer state ... here?
+ * the completion routine should get called with
+ * an appropriate status code.
+ */
+
+ value = musb_readw(regs, MUSB_TXCSR);
+ value &= ~MUSB_TXCSR_DMAENAB;
+ value |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(regs, MUSB_TXCSR, value);
+ musb_writew(regs, MUSB_TXCSR, value);
+
+ /* re-enable interrupt */
+ if (enabled)
+ musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+ (1 << cppi_ch->index));
+
+ /* While we scrub the TX state RAM, ensure that we clean
+ * up any interrupt that's currently asserted:
+ * 1. Write to completion Ptr value 0x1(bit 0 set)
+ * (write back mode)
+ * 2. Write to completion Ptr value 0x0(bit 0 cleared)
+ * (compare mode)
+ * Value written is compared(for bits 31:2) and when
+ * equal, interrupt is deasserted.
+ */
+ cppi_reset_tx(tx_ram, 1);
+ musb_writel(&tx_ram->tx_complete, 0, 0);
+
+ cppi_dump_tx(5, cppi_ch, " (done teardown)");
+
+ /* REVISIT tx side _should_ clean up the same way
+ * as the RX side ... this does no cleanup at all!
+ */
+
+ } else /* RX */ {
+ u16 csr;
+
+ /* NOTE: docs don't guarantee any of this works ... we
+ * expect that if the usb core stops telling the cppi core
+ * to pull more data from it, then it'll be safe to flush
+ * current RX DMA state iff any pending fifo transfer is done.
+ */
+
+ core_rxirq_disable(tibase, cppi_ch->index + 1);
+
+ /* for host, ensure ReqPkt is never set again */
+ if (is_host_active(cppi_ch->controller->musb)) {
+ value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ value &= ~((0x3) << (cppi_ch->index * 2));
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
+ }
+
+ csr = musb_readw(regs, MUSB_RXCSR);
+
+ /* for host, clear (just) ReqPkt at end of current packet(s) */
+ if (is_host_active(cppi_ch->controller->musb)) {
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ csr &= ~MUSB_RXCSR_H_REQPKT;
+ } else
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+
+ /* clear dma enable */
+ csr &= ~(MUSB_RXCSR_DMAENAB);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ csr = musb_readw(regs, MUSB_RXCSR);
+
+ /* Quiesce: wait for current dma to finish (if not cleanup).
+ * We can't use bit zero of stateram->rx_sop, since that
+ * refers to an entire "DMA packet" not just emptying the
+ * current fifo. Most segments need multiple usb packets.
+ */
+ if (channel->status == MUSB_DMA_STATUS_BUSY)
+ udelay(50);
+
+ /* scan the current list, reporting any data that was
+ * transferred and acking any IRQ
+ */
+ cppi_rx_scan(controller, cppi_ch->index);
+
+ /* clobber the existing state once it's idle
+ *
+ * NOTE: arguably, we should also wait for all the other
+ * RX channels to quiesce (how??) and then temporarily
+ * disable RXCPPI_CTRL_REG ... but it seems that we can
+ * rely on the controller restarting from state ram, with
+ * only RXCPPI_BUFCNT state being bogus. BUFCNT will
+ * correct itself after the next DMA transfer though.
+ *
+ * REVISIT does using rndis mode change that?
+ */
+ cppi_reset_rx(cppi_ch->state_ram);
+
+ /* next DMA request _should_ load cppi head ptr */
+
+ /* ... we don't "free" that list, only mutate it in place. */
+ cppi_dump_rx(5, cppi_ch, " (done abort)");
+
+ /* clean up previously pending bds */
+ cppi_bd_free(cppi_ch, cppi_ch->last_processed);
+ cppi_ch->last_processed = NULL;
+
+ while (queue) {
+ struct cppi_descriptor *tmp = queue->next;
+
+ cppi_bd_free(cppi_ch, queue);
+ queue = tmp;
+ }
+ }
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->buf_dma = 0;
+ cppi_ch->offset = 0;
+ cppi_ch->buf_len = 0;
+ cppi_ch->maxpacket = 0;
+ return 0;
+}
+
+/* TBD Queries:
+ *
+ * Power Management ... probably turn off cppi during suspend, restart;
+ * check state ram? Clocking is presumably shared with usb core.
+ */
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
new file mode 100644
index 000000000000..fc5216b5d2c5
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.h
@@ -0,0 +1,133 @@
+/* Copyright (C) 2005-2006 by Texas Instruments */
+
+#ifndef _CPPI_DMA_H_
+#define _CPPI_DMA_H_
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/dmapool.h>
+
+#include "musb_dma.h"
+#include "musb_core.h"
+
+
+/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
+ * would seem to be shared with the TUSB6020 (over VLYNQ).
+ */
+
+#include "davinci.h"
+
+
+/* CPPI RX/TX state RAM */
+
+struct cppi_tx_stateram {
+ u32 tx_head; /* "DMA packet" head descriptor */
+ u32 tx_buf;
+ u32 tx_current; /* current descriptor */
+ u32 tx_buf_current;
+ u32 tx_info; /* flags, remaining buflen */
+ u32 tx_rem_len;
+ u32 tx_dummy; /* unused */
+ u32 tx_complete;
+};
+
+struct cppi_rx_stateram {
+ u32 rx_skipbytes;
+ u32 rx_head;
+ u32 rx_sop; /* "DMA packet" head descriptor */
+ u32 rx_current; /* current descriptor */
+ u32 rx_buf_current;
+ u32 rx_len_len;
+ u32 rx_cnt_cnt;
+ u32 rx_complete;
+};
+
+/* hw_options bits in CPPI buffer descriptors */
+#define CPPI_SOP_SET ((u32)(1 << 31))
+#define CPPI_EOP_SET ((u32)(1 << 30))
+#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */
+#define CPPI_EOQ_MASK ((u32)(1 << 28))
+#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */
+#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */
+
+#define CPPI_RECV_PKTLEN_MASK 0xFFFF
+#define CPPI_BUFFER_LEN_MASK 0xFFFF
+
+#define CPPI_TEAR_READY ((u32)(1 << 31))
+
+/* CPPI data structure definitions */
+
+#define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */
+
+struct cppi_descriptor {
+ /* hardware overlay */
+ u32 hw_next; /* next buffer descriptor Pointer */
+ u32 hw_bufp; /* i/o buffer pointer */
+ u32 hw_off_len; /* buffer_offset16, buffer_length16 */
+ u32 hw_options; /* flags: SOP, EOP etc*/
+
+ struct cppi_descriptor *next;
+ dma_addr_t dma; /* address of this descriptor */
+ u32 buflen; /* for RX: original buffer length */
+} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
+
+
+struct cppi;
+
+/* CPPI Channel Control structure */
+struct cppi_channel {
+ struct dma_channel channel;
+
+ /* back pointer to the DMA controller structure */
+ struct cppi *controller;
+
+ /* which direction of which endpoint? */
+ struct musb_hw_ep *hw_ep;
+ bool transmit;
+ u8 index;
+
+ /* DMA modes: RNDIS or "transparent" */
+ u8 is_rndis;
+
+ /* book keeping for current transfer request */
+ dma_addr_t buf_dma;
+ u32 buf_len;
+ u32 maxpacket;
+ u32 offset; /* dma requested */
+
+ void __iomem *state_ram; /* CPPI state */
+
+ struct cppi_descriptor *freelist;
+
+ /* BD management fields */
+ struct cppi_descriptor *head;
+ struct cppi_descriptor *tail;
+ struct cppi_descriptor *last_processed;
+
+ /* use tx_complete in host role to track endpoints waiting for
+ * FIFONOTEMPTY to clear.
+ */
+ struct list_head tx_complete;
+};
+
+/* CPPI DMA controller object */
+struct cppi {
+ struct dma_controller controller;
+ struct musb *musb;
+ void __iomem *mregs; /* Mentor regs */
+ void __iomem *tibase; /* TI/CPPI regs */
+
+ struct cppi_channel tx[MUSB_C_NUM_EPT - 1];
+ struct cppi_channel rx[MUSB_C_NUM_EPR - 1];
+
+ struct dma_pool *pool;
+
+ struct list_head tx_complete;
+};
+
+/* irq handling hook */
+extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+
+#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
new file mode 100644
index 000000000000..75baf181a8cd
--- /dev/null
+++ b/drivers/usb/musb/davinci.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/arch/gpio.h>
+#include <asm/mach-types.h>
+
+#include "musb_core.h"
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#include <asm/arch/i2c-client.h>
+#endif
+
+#include "davinci.h"
+#include "cppi_dma.h"
+
+
+/* REVISIT (PM) we should be able to keep the PHY in low power mode most
+ * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
+ * and, when in host mode, autosuspending idle root ports... PHYPLLON
+ * (overriding SUSPENDM?) then likely needs to stay off.
+ */
+
+static inline void phy_on(void)
+{
+ /* start the on-chip PHY and its PLL */
+ __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
+ (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
+ while ((__raw_readl((void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR))
+ & USBPHY_PHYCLKGD) == 0)
+ cpu_relax();
+}
+
+static inline void phy_off(void)
+{
+ /* powerdown the on-chip PHY and its oscillator */
+ __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR));
+}
+
+static int dma_off = 1;
+
+void musb_platform_enable(struct musb *musb)
+{
+ u32 tmp, old, val;
+
+ /* workaround: setup irqs through both register sets */
+ tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
+ << DAVINCI_USB_TXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ old = tmp;
+ tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
+ << DAVINCI_USB_RXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ tmp |= old;
+
+ val = ~MUSB_INTR_SOF;
+ tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+
+ if (is_dma_capable() && !dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __func__);
+ else
+ dma_off = 0;
+
+ /* force a DRVVBUS irq so we can start polling for ID change */
+ if (is_otg_enabled(musb))
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
+}
+
+/*
+ * Disable the HDRC and flush interrupts
+ */
+void musb_platform_disable(struct musb *musb)
+{
+ /* because we don't set CTRLR.UINT, "important" to:
+ * - not read/write INTRUSB/INTRUSBE
+ * - (except during initial setup, as workaround)
+ * - use INTSETR/INTCLRR instead
+ */
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
+ DAVINCI_USB_USBINT_MASK
+ | DAVINCI_USB_TXINT_MASK
+ | DAVINCI_USB_RXINT_MASK);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
+
+ if (is_dma_capable() && !dma_off)
+ WARNING("dma still active\n");
+}
+
+
+/* REVISIT it's not clear whether DaVinci can support full OTG. */
+
+static int vbus_state = -1;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#define portstate(stmt) stmt
+#else
+#define portstate(stmt)
+#endif
+
+
+/* VBUS SWITCHING IS BOARD-SPECIFIC */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#ifndef CONFIG_MACH_DAVINCI_EVM_OTG
+
+/* I2C operations are always synchronous, and require a task context.
+ * With unloaded systems, using the shared workqueue seems to suffice
+ * to satisfy the 100msec A_WAIT_VRISE timeout...
+ */
+static void evm_deferred_drvvbus(struct work_struct *ignored)
+{
+ davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state);
+ vbus_state = !vbus_state;
+}
+static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
+
+#endif /* modified board */
+#endif /* EVM */
+
+static void davinci_source_power(struct musb *musb, int is_on, int immediate)
+{
+ if (is_on)
+ is_on = 1;
+
+ if (vbus_state == is_on)
+ return;
+ vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+ if (machine_is_davinci_evm()) {
+#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
+ /* modified EVM board switching VBUS with GPIO(6) not I2C
+ * NOTE: PINMUX0.RGB888 (bit23) must be clear
+ */
+ if (is_on)
+ gpio_set(GPIO(6));
+ else
+ gpio_clear(GPIO(6));
+ immediate = 1;
+#else
+ if (immediate)
+ davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
+ else
+ schedule_work(&evm_vbus_work);
+#endif
+ }
+#endif
+ if (immediate)
+ vbus_state = is_on;
+}
+
+static void davinci_set_vbus(struct musb *musb, int is_on)
+{
+ WARN_ON(is_on && is_peripheral_active(musb));
+ davinci_source_power(musb, is_on, 0);
+}
+
+
+#define POLL_SECONDS 2
+
+static struct timer_list otg_workaround;
+
+static void otg_timer(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /* We poll because DaVinci's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VFALL:
+ /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
+ * seems to mis-handle session "start" otherwise (or in our
+ * case "recover"), in routine "VBUS was valid by the time
+ * VBUSERR got reported during enumeration" cases.
+ */
+ if (devctl & MUSB_DEVCTL_VBUS) {
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ break;
+ }
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
+ break;
+ case OTG_STATE_B_IDLE:
+ if (!is_peripheral_enabled(musb))
+ break;
+
+ /* There's no ID-changed IRQ, so we have no good way to tell
+ * when to switch to the A-Default state machine (by setting
+ * the DEVCTL.SESSION flag).
+ *
+ * Workaround: whenever we're in B_IDLE, try setting the
+ * session flag every few seconds. If it works, ID was
+ * grounded and we're now in the A-Default state machine.
+ *
+ * NOTE setting the session flag is _supposed_ to trigger
+ * SRP, but clearly it doesn't.
+ */
+ musb_writeb(mregs, MUSB_DEVCTL,
+ devctl | MUSB_DEVCTL_SESSION);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static irqreturn_t davinci_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+ void __iomem *tibase = musb->ctrl_base;
+ u32 tmp;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
+ * the Mentor registers (except for setup), use the TI ones and EOI.
+ *
+ * Docs describe irq "vector" registers asociated with the CPPI and
+ * USB EOI registers. These hold a bitmask corresponding to the
+ * current IRQ, not an irq handler address. Would using those bits
+ * resolve some of the races observed in this dispatch code??
+ */
+
+ /* CPPI interrupts share the same IRQ line, but have their own
+ * mask, state, "vector", and EOI registers.
+ */
+ if (is_cppi_enabled()) {
+ u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+ u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+ if (cppi_tx || cppi_rx) {
+ DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
+ cppi_completion(musb, cppi_rx, cppi_tx);
+ retval = IRQ_HANDLED;
+ }
+ }
+
+ /* ack and handle non-CPPI interrupts */
+ tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
+ musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
+ DBG(4, "IRQ %08x\n", tmp);
+
+ musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
+ >> DAVINCI_USB_RXINT_SHIFT;
+ musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
+ >> DAVINCI_USB_TXINT_SHIFT;
+ musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
+ >> DAVINCI_USB_USBINT_SHIFT;
+
+ /* DRVVBUS irqs are the only proxy we have (a very poor one!) for
+ * DaVinci's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.SESSION per Mentor docs requires we know its
+ * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+ * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+ */
+ if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
+ int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err = musb->int_usb & MUSB_INTR_VBUSERROR;
+
+ err = is_host_enabled(musb)
+ && (musb->int_usb & MUSB_INTR_VBUSERROR);
+ if (err) {
+ /* The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"starting sessions
+ * without waiting (on EVM, a **long** time) for VBUS
+ * to stop registering in devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (is_host_enabled(musb) && drvvbus) {
+ musb->is_active = 1;
+ MUSB_HST_MODE(musb);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ del_timer(&otg_workaround);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+
+ /* NOTE: this must complete poweron within 100 msec */
+ davinci_source_power(musb, drvvbus, 0);
+ DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ otg_state_string(musb),
+ err ? " ERROR" : "",
+ devctl);
+ retval = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ retval |= musb_interrupt(musb);
+
+ /* irq stays asserted until EOI is written */
+ musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
+
+ /* poll for ID change */
+ if (is_otg_enabled(musb)
+ && musb->xceiv.state == OTG_STATE_B_IDLE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* REVISIT we sometimes get unhandled IRQs
+ * (e.g. ep0). not clear why...
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "unhandled? %08x\n", tmp);
+ return IRQ_HANDLED;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ void __iomem *tibase = musb->ctrl_base;
+ u32 revision;
+
+ musb->mregs += DAVINCI_BASE_OFFSET;
+#if 0
+ /* REVISIT there's something odd about clocking, this
+ * didn't appear do the job ...
+ */
+ musb->clock = clk_get(pDevice, "usb");
+ if (IS_ERR(musb->clock))
+ return PTR_ERR(musb->clock);
+
+ status = clk_enable(musb->clock);
+ if (status < 0)
+ return -ENODEV;
+#endif
+
+ /* returns zero if e.g. not clocked */
+ revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
+ if (revision == 0)
+ return -ENODEV;
+
+ if (is_host_enabled(musb))
+ setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+
+ musb->board_set_vbus = davinci_set_vbus;
+ davinci_source_power(musb, 0, 1);
+
+ /* reset the controller */
+ musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
+
+ /* start the on-chip PHY and its PLL */
+ phy_on();
+
+ msleep(5);
+
+ /* NOTE: irqs are in mixed mode, not bypass to pure-musb */
+ pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
+ revision, __raw_readl((void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR)),
+ musb_readb(tibase, DAVINCI_USB_CTRL_REG));
+
+ musb->isr = davinci_interrupt;
+ return 0;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+ if (is_host_enabled(musb))
+ del_timer_sync(&otg_workaround);
+
+ davinci_source_power(musb, 0 /*off*/, 1);
+
+ /* delay, to avoid problems with module reload */
+ if (is_host_enabled(musb) && musb->xceiv.default_a) {
+ int maxdelay = 30;
+ u8 devctl, warn = 0;
+
+ /* if there's no peripheral connected, this can take a
+ * long time to fall, especially on EVM with huge C133.
+ */
+ do {
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (!(devctl & MUSB_DEVCTL_VBUS))
+ break;
+ if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
+ warn = devctl & MUSB_DEVCTL_VBUS;
+ DBG(1, "VBUS %d\n",
+ warn >> MUSB_DEVCTL_VBUS_SHIFT);
+ }
+ msleep(1000);
+ maxdelay--;
+ } while (maxdelay > 0);
+
+ /* in OTG mode, another host might be connected */
+ if (devctl & MUSB_DEVCTL_VBUS)
+ DBG(1, "VBUS off timeout (devctl %02x)\n", devctl);
+ }
+
+ phy_off();
+ return 0;
+}
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
new file mode 100644
index 000000000000..7fb6238e270f
--- /dev/null
+++ b/drivers/usb/musb/davinci.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_HDRDF_H__
+#define __MUSB_HDRDF_H__
+
+/*
+ * DaVinci-specific definitions
+ */
+
+/* Integrated highspeed/otg PHY */
+#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_PHYCLKGD (1 << 8)
+#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */
+#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */
+#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */
+#define USBPHY_CLKO1SEL (1 << 3)
+#define USBPHY_OSCPDWN (1 << 2)
+#define USBPHY_PHYPDWN (1 << 0)
+
+/* For now include usb OTG module registers here */
+#define DAVINCI_USB_VERSION_REG 0x00
+#define DAVINCI_USB_CTRL_REG 0x04
+#define DAVINCI_USB_STAT_REG 0x08
+#define DAVINCI_RNDIS_REG 0x10
+#define DAVINCI_AUTOREQ_REG 0x14
+#define DAVINCI_USB_INT_SOURCE_REG 0x20
+#define DAVINCI_USB_INT_SET_REG 0x24
+#define DAVINCI_USB_INT_SRC_CLR_REG 0x28
+#define DAVINCI_USB_INT_MASK_REG 0x2c
+#define DAVINCI_USB_INT_MASK_SET_REG 0x30
+#define DAVINCI_USB_INT_MASK_CLR_REG 0x34
+#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38
+#define DAVINCI_USB_EOI_REG 0x3c
+#define DAVINCI_USB_EOI_INTVEC 0x40
+
+/* BEGIN CPPI-generic (?) */
+
+/* CPPI related registers */
+#define DAVINCI_TXCPPI_CTRL_REG 0x80
+#define DAVINCI_TXCPPI_TEAR_REG 0x84
+#define DAVINCI_CPPI_EOI_REG 0x88
+#define DAVINCI_CPPI_INTVEC_REG 0x8c
+#define DAVINCI_TXCPPI_MASKED_REG 0x90
+#define DAVINCI_TXCPPI_RAW_REG 0x94
+#define DAVINCI_TXCPPI_INTENAB_REG 0x98
+#define DAVINCI_TXCPPI_INTCLR_REG 0x9c
+
+#define DAVINCI_RXCPPI_CTRL_REG 0xC0
+#define DAVINCI_RXCPPI_MASKED_REG 0xD0
+#define DAVINCI_RXCPPI_RAW_REG 0xD4
+#define DAVINCI_RXCPPI_INTENAB_REG 0xD8
+#define DAVINCI_RXCPPI_INTCLR_REG 0xDC
+
+#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0
+#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4
+#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8
+#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC
+
+/* CPPI state RAM entries */
+#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100
+
+#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40))
+#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40))
+
+/* CPPI masks */
+#define DAVINCI_DMA_CTRL_ENABLE 1
+#define DAVINCI_DMA_CTRL_DISABLE 0
+
+#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF
+#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
+
+/* END CPPI-generic (?) */
+
+#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */
+#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */
+
+#define DAVINCI_USB_USBINT_SHIFT 16
+#define DAVINCI_USB_TXINT_SHIFT 0
+#define DAVINCI_USB_RXINT_SHIFT 8
+
+#define DAVINCI_INTR_DRVVBUS 0x0100
+
+#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */
+#define DAVINCI_USB_TXINT_MASK \
+ (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
+#define DAVINCI_USB_RXINT_MASK \
+ (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
+
+#define DAVINCI_BASE_OFFSET 0x400
+
+#endif /* __MUSB_HDRDF_H__ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
new file mode 100644
index 000000000000..d68ec6daf335
--- /dev/null
+++ b/drivers/usb/musb/musb_core.c
@@ -0,0 +1,2261 @@
+/*
+ * MUSB OTG driver core code
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
+ *
+ * This consists of a Host Controller Driver (HCD) and a peripheral
+ * controller driver implementing the "Gadget" API; OTG support is
+ * in the works. These are normal Linux-USB controller drivers which
+ * use IRQs and have no dedicated thread.
+ *
+ * This version of the driver has only been used with products from
+ * Texas Instruments. Those products integrate the Inventra logic
+ * with other DMA, IRQ, and bus modules, as well as other logic that
+ * needs to be reflected in this driver.
+ *
+ *
+ * NOTE: the original Mentor code here was pretty much a collection
+ * of mechanisms that don't seem to have been fully integrated/working
+ * for *any* Linux kernel version. This version aims at Linux 2.6.now,
+ * Key open issues include:
+ *
+ * - Lack of host-side transaction scheduling, for all transfer types.
+ * The hardware doesn't do it; instead, software must.
+ *
+ * This is not an issue for OTG devices that don't support external
+ * hubs, but for more "normal" USB hosts it's a user issue that the
+ * "multipoint" support doesn't scale in the expected ways. That
+ * includes DaVinci EVM in a common non-OTG mode.
+ *
+ * * Control and bulk use dedicated endpoints, and there's as
+ * yet no mechanism to either (a) reclaim the hardware when
+ * peripherals are NAKing, which gets complicated with bulk
+ * endpoints, or (b) use more than a single bulk endpoint in
+ * each direction.
+ *
+ * RESULT: one device may be perceived as blocking another one.
+ *
+ * * Interrupt and isochronous will dynamically allocate endpoint
+ * hardware, but (a) there's no record keeping for bandwidth;
+ * (b) in the common case that few endpoints are available, there
+ * is no mechanism to reuse endpoints to talk to multiple devices.
+ *
+ * RESULT: At one extreme, bandwidth can be overcommitted in
+ * some hardware configurations, no faults will be reported.
+ * At the other extreme, the bandwidth capabilities which do
+ * exist tend to be severely undercommitted. You can't yet hook
+ * up both a keyboard and a mouse to an external USB hub.
+ */
+
+/*
+ * This gets many kinds of configuration information:
+ * - Kconfig for everything user-configurable
+ * - <asm/arch/hdrc_cnf.h> for SOC or family details
+ * - platform_device for addressing, irq, and platform_data
+ * - platform_data is mostly for board-specific informarion
+ *
+ * Most of the conditional compilation will (someday) vanish.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#ifdef CONFIG_ARM
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/mach-types.h>
+#endif
+
+#include "musb_core.h"
+
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include "davinci.h"
+#endif
+
+
+
+#if MUSB_DEBUG > 0
+unsigned debug = MUSB_DEBUG;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "initial debug message level");
+
+#define MUSB_VERSION_SUFFIX "/dbg"
+#endif
+
+#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
+#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
+
+#define MUSB_VERSION_BASE "6.0"
+
+#ifndef MUSB_VERSION_SUFFIX
+#define MUSB_VERSION_SUFFIX ""
+#endif
+#define MUSB_VERSION MUSB_VERSION_BASE MUSB_VERSION_SUFFIX
+
+#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
+
+#define MUSB_DRIVER_NAME "musb_hdrc"
+const char musb_driver_name[] = MUSB_DRIVER_NAME;
+
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct musb *dev_to_musb(struct device *dev)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* usbcore insists dev->driver_data is a "struct hcd *" */
+ return hcd_to_musb(dev_get_drvdata(dev));
+#else
+ return dev_get_drvdata(dev);
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifndef CONFIG_USB_TUSB6010
+/*
+ * Load an endpoint's FIFO
+ */
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ prefetch((u8 *)src);
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', hw_ep->epnum, fifo, len, src);
+
+ /* we can't assume unaligned reads work */
+ if (likely((0x01 & (unsigned long) src) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned source address */
+ if ((0x02 & (unsigned long) src) == 0) {
+ if (len >= 4) {
+ writesl(fifo, src + index, len >> 2);
+ index += len & ~0x03;
+ }
+ if (len & 0x02) {
+ musb_writew(fifo, 0, *(u16 *)&src[index]);
+ index += 2;
+ }
+ } else {
+ if (len >= 2) {
+ writesw(fifo, src + index, len >> 1);
+ index += len & ~0x01;
+ }
+ }
+ if (len & 0x01)
+ musb_writeb(fifo, 0, src[index]);
+ } else {
+ /* byte aligned */
+ writesb(fifo, src, len);
+ }
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', hw_ep->epnum, fifo, len, dst);
+
+ /* we can't assume unaligned writes work */
+ if (likely((0x01 & (unsigned long) dst) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) dst) == 0) {
+ if (len >= 4) {
+ readsl(fifo, dst, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x02) {
+ *(u16 *)&dst[index] = musb_readw(fifo, 0);
+ index += 2;
+ }
+ } else {
+ if (len >= 2) {
+ readsw(fifo, dst, len >> 1);
+ index = len & ~0x01;
+ }
+ }
+ if (len & 0x01)
+ dst[index] = musb_readb(fifo, 0);
+ } else {
+ /* byte aligned */
+ readsb(fifo, dst, len);
+ }
+}
+
+#endif /* normal PIO */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 musb_test_packet[53] = {
+ /* implicit SYNC then DATA0 to start */
+
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+
+ /* implicit CRC16 then EOP to end */
+};
+
+void musb_load_testpacket(struct musb *musb)
+{
+ void __iomem *regs = musb->endpoints[0].regs;
+
+ musb_ep_select(musb->mregs, 0);
+ musb_write_fifo(musb->control_ep,
+ sizeof(musb_test_packet), musb_test_packet);
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
+}
+
+/*-------------------------------------------------------------------------*/
+
+const char *otg_state_string(struct musb *musb)
+{
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_IDLE: return "a_idle";
+ case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
+ case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
+ case OTG_STATE_A_HOST: return "a_host";
+ case OTG_STATE_A_SUSPEND: return "a_suspend";
+ case OTG_STATE_A_PERIPHERAL: return "a_peripheral";
+ case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall";
+ case OTG_STATE_A_VBUS_ERR: return "a_vbus_err";
+ case OTG_STATE_B_IDLE: return "b_idle";
+ case OTG_STATE_B_SRP_INIT: return "b_srp_init";
+ case OTG_STATE_B_PERIPHERAL: return "b_peripheral";
+ case OTG_STATE_B_WAIT_ACON: return "b_wait_acon";
+ case OTG_STATE_B_HOST: return "b_host";
+ default: return "UNDEFINED";
+ }
+}
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+/*
+ * See also USB_OTG_1-3.pdf 6.6.5 Timers
+ * REVISIT: Are the other timers done in the hardware?
+ */
+#define TB_ASE0_BRST 100 /* Min 3.125 ms */
+
+/*
+ * Handles OTG hnp timeouts, such as b_ase0_brst
+ */
+void musb_otg_timer_func(unsigned long data)
+{
+ struct musb *musb = (struct musb *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+ musb_g_disconnect(musb);
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
+ musb_hnp_stop(musb);
+ break;
+ default:
+ DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
+ }
+ musb->ignore_disconnect = 0;
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
+
+/*
+ * Stops the B-device HNP state. Caller must take care of locking.
+ */
+void musb_hnp_stop(struct musb *musb)
+{
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+ void __iomem *mbase = musb->mregs;
+ u8 reg;
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_PERIPHERAL:
+ case OTG_STATE_A_WAIT_VFALL:
+ case OTG_STATE_A_WAIT_BCON:
+ DBG(1, "HNP: Switching back to A-host\n");
+ musb_g_disconnect(musb);
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_B_HOST:
+ DBG(1, "HNP: Disabling HR\n");
+ hcd->self.is_b_host = 0;
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ reg = musb_readb(mbase, MUSB_POWER);
+ reg |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, reg);
+ /* REVISIT: Start SESSION_REQUEST here? */
+ break;
+ default:
+ DBG(1, "HNP: Stopping in unknown state %s\n",
+ otg_state_string(musb));
+ }
+
+ /*
+ * When returning to A state after HNP, avoid hub_port_rebounce(),
+ * which cause occasional OPT A "Did not receive reset after connect"
+ * errors.
+ */
+ musb->port1_status &=
+ ~(1 << USB_PORT_FEAT_C_CONNECTION);
+}
+
+#endif
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+
+#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \
+ | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \
+ | MUSB_INTR_RESET)
+
+static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ u8 devctl, u8 power)
+{
+ irqreturn_t handled = IRQ_NONE;
+ void __iomem *mbase = musb->mregs;
+
+ DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
+ int_usb);
+
+ /* in host mode, the peripheral may issue remote wakeup.
+ * in peripheral mode, the host may resume the link.
+ * spurious RESUME irqs happen too, paired with SUSPEND.
+ */
+ if (int_usb & MUSB_INTR_RESUME) {
+ handled = IRQ_HANDLED;
+ DBG(3, "RESUME (%s)\n", otg_state_string(musb));
+
+ if (devctl & MUSB_DEVCTL_HM) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_SUSPEND:
+ /* remote wakeup? later, GetPortStatus
+ * will stop RESUME signaling
+ */
+
+ if (power & MUSB_POWER_SUSPENDM) {
+ /* spurious */
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ DBG(2, "Spurious SUSPENDM\n");
+ break;
+ }
+
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER,
+ power | MUSB_POWER_RESUME);
+
+ musb->port1_status |=
+ (USB_PORT_STAT_C_SUSPEND << 16)
+ | MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(20);
+
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->is_active = 1;
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 1;
+ MUSB_DEV_MODE(musb);
+ break;
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+ "host",
+ otg_state_string(musb));
+ }
+#endif
+ } else {
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_SUSPEND:
+ /* possibly DISCONNECT is upcoming */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ /* disconnect while suspended? we may
+ * not get a disconnect irq...
+ */
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ != (3 << MUSB_DEVCTL_VBUS_SHIFT)
+ ) {
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+ }
+ musb_g_resume(musb);
+ break;
+ case OTG_STATE_B_IDLE:
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+#endif
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+ "peripheral",
+ otg_state_string(musb));
+ }
+ }
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* see manual for the order of the tests */
+ if (int_usb & MUSB_INTR_SESSREQ) {
+ DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
+
+ /* IRQ arrives from ID pin sense or (later, if VBUS power
+ * is removed) SRP. responses are time critical:
+ * - turn on VBUS (with silicon-specific mechanism)
+ * - go through A_WAIT_VRISE
+ * - ... to A_WAIT_BCON.
+ * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+ */
+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ musb->ep0_stage = MUSB_EP0_START;
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ musb_set_vbus(musb, 1);
+
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_VBUSERROR) {
+ int ignore = 0;
+
+ /* During connection as an A-Device, we may see a short
+ * current spikes causing voltage drop, because of cable
+ * and peripheral capacitance combined with vbus draw.
+ * (So: less common with truly self-powered devices, where
+ * vbus doesn't act like a power supply.)
+ *
+ * Such spikes are short; usually less than ~500 usec, max
+ * of ~2 msec. That is, they're not sustained overcurrent
+ * errors, though they're reported using VBUSERROR irqs.
+ *
+ * Workarounds: (a) hardware: use self powered devices.
+ * (b) software: ignore non-repeated VBUS errors.
+ *
+ * REVISIT: do delays from lots of DEBUG_KERNEL checks
+ * make trouble here, keeping VBUS < 4.4V ?
+ */
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ /* recovery is dicey once we've gotten past the
+ * initial stages of enumeration, but if VBUS
+ * stayed ok at the other end of the link, and
+ * another reset is due (at least for high speed,
+ * to redo the chirp etc), it might work OK...
+ */
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_WAIT_VRISE:
+ if (musb->vbuserr_retry) {
+ musb->vbuserr_retry--;
+ ignore = 1;
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mbase, MUSB_DEVCTL, devctl);
+ } else {
+ musb->port1_status |=
+ (1 << USB_PORT_FEAT_OVER_CURRENT)
+ | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ }
+ break;
+ default:
+ break;
+ }
+
+ DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
+ otg_state_string(musb),
+ devctl,
+ ({ char *s;
+ switch (devctl & MUSB_DEVCTL_VBUS) {
+ case 0 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<SessEnd"; break;
+ case 1 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<AValid"; break;
+ case 2 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<VBusValid"; break;
+ /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
+ default:
+ s = "VALID"; break;
+ }; s; }),
+ VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
+ musb->port1_status);
+
+ /* go through A_WAIT_VFALL then start a new session */
+ if (!ignore)
+ musb_set_vbus(musb, 0);
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_CONNECT) {
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+
+ handled = IRQ_HANDLED;
+ musb->is_active = 1;
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+ musb->ep0_stage = MUSB_EP0_START;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ /* flush endpoints when transitioning from Device Mode */
+ if (is_peripheral_active(musb)) {
+ /* REVISIT HNP; just force disconnect */
+ }
+ musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
+ musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
+ musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
+#endif
+ musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+ |USB_PORT_STAT_HIGH_SPEED
+ |USB_PORT_STAT_ENABLE
+ );
+ musb->port1_status |= USB_PORT_STAT_CONNECTION
+ |(USB_PORT_STAT_C_CONNECTION << 16);
+
+ /* high vs full speed is just a guess until after reset */
+ if (devctl & MUSB_DEVCTL_LSDEV)
+ musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
+ if (hcd->status_urb)
+ usb_hcd_poll_rh_status(hcd);
+ else
+ usb_hcd_resume_root_hub(hcd);
+
+ MUSB_HST_MODE(musb);
+
+ /* indicate new connection to OTG machine */
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_PERIPHERAL:
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
+ musb->xceiv.state = OTG_STATE_B_HOST;
+ hcd->self.is_b_host = 1;
+ int_usb &= ~MUSB_INTR_SUSPEND;
+ } else
+ DBG(1, "CONNECT as b_peripheral???\n");
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: Waiting to switch to b_host state\n");
+ musb->xceiv.state = OTG_STATE_B_HOST;
+ hcd->self.is_b_host = 1;
+ break;
+ default:
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ hcd->self.is_b_host = 0;
+ }
+ break;
+ }
+ DBG(1, "CONNECT (%s) devctl %02x\n",
+ otg_state_string(musb), devctl);
+ }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+ /* mentor saves a bit: bus reset and babble share the same irq.
+ * only host sees babble; only peripheral sees bus reset.
+ */
+ if (int_usb & MUSB_INTR_RESET) {
+ if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
+ /*
+ * Looks like non-HS BABBLE can be ignored, but
+ * HS BABBLE is an error condition. For HS the solution
+ * is to avoid babble in the first place and fix what
+ * caused BABBLE. When HS BABBLE happens we can only
+ * stop the session.
+ */
+ if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
+ DBG(1, "BABBLE devctl: %02x\n", devctl);
+ else {
+ ERR("Stopping host session -- babble\n");
+ musb_writeb(mbase, MUSB_DEVCTL, 0);
+ }
+ } else if (is_peripheral_capable()) {
+ DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_OTG
+ case OTG_STATE_A_SUSPEND:
+ /* We need to ignore disconnect on suspend
+ * otherwise tusb 2.0 won't reconnect after a
+ * power cycle, which breaks otg compliance.
+ */
+ musb->ignore_disconnect = 1;
+ musb_g_reset(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
+ DBG(1, "HNP: Setting timer as %s\n",
+ otg_state_string(musb));
+ musb_otg_timer.data = (unsigned long)musb;
+ mod_timer(&musb_otg_timer, jiffies
+ + msecs_to_jiffies(100));
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: RESET (%s), to b_peripheral\n",
+ otg_state_string(musb));
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb_g_reset(musb);
+ break;
+#endif
+ case OTG_STATE_B_IDLE:
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ /* FALLTHROUGH */
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_reset(musb);
+ break;
+ default:
+ DBG(1, "Unhandled BUS RESET as %s\n",
+ otg_state_string(musb));
+ }
+ }
+
+ handled = IRQ_HANDLED;
+ }
+ schedule_work(&musb->irq_work);
+
+ return handled;
+}
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
+ u8 devctl, u8 power)
+{
+ irqreturn_t handled = IRQ_NONE;
+
+#if 0
+/* REVISIT ... this would be for multiplexing periodic endpoints, or
+ * supporting transfer phasing to prevent exceeding ISO bandwidth
+ * limits of a given frame or microframe.
+ *
+ * It's not needed for peripheral side, which dedicates endpoints;
+ * though it _might_ use SOF irqs for other purposes.
+ *
+ * And it's not currently needed for host side, which also dedicates
+ * endpoints, relies on TX/RX interval registers, and isn't claimed
+ * to support ISO transfers yet.
+ */
+ if (int_usb & MUSB_INTR_SOF) {
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *ep;
+ u8 epnum;
+ u16 frame;
+
+ DBG(6, "START_OF_FRAME\n");
+ handled = IRQ_HANDLED;
+
+ /* start any periodic Tx transfers waiting for current frame */
+ frame = musb_readw(mbase, MUSB_FRAME);
+ ep = musb->endpoints;
+ for (epnum = 1; (epnum < musb->nr_endpoints)
+ && (musb->epmask >= (1 << epnum));
+ epnum++, ep++) {
+ /*
+ * FIXME handle framecounter wraps (12 bits)
+ * eliminate duplicated StartUrb logic
+ */
+ if (ep->dwWaitFrame >= frame) {
+ ep->dwWaitFrame = 0;
+ pr_debug("SOF --> periodic TX%s on %d\n",
+ ep->tx_channel ? " DMA" : "",
+ epnum);
+ if (!ep->tx_channel)
+ musb_h_tx_start(musb, epnum);
+ else
+ cppi_hostdma_start(musb, epnum);
+ }
+ } /* end of for loop */
+ }
+#endif
+
+ if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
+ DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
+ otg_state_string(musb),
+ MUSB_MODE(musb), devctl);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ musb_root_disconnect(musb);
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+#endif /* HOST */
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_B_HOST:
+ musb_hnp_stop(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ musb_root_disconnect(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_B_WAIT_ACON:
+ /* FALLTHROUGH */
+#endif /* OTG */
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb_g_disconnect(musb);
+ break;
+#endif /* GADGET */
+ default:
+ WARNING("unhandled DISCONNECT transition (%s)\n",
+ otg_state_string(musb));
+ break;
+ }
+
+ schedule_work(&musb->irq_work);
+ }
+
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
+ otg_state_string(musb), devctl, power);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_A_PERIPHERAL:
+ /*
+ * We cannot stop HNP here, devctl BDEVICE might be
+ * still set.
+ */
+ break;
+#endif
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.gadget->b_hnp_enable;
+ if (musb->is_active) {
+#ifdef CONFIG_USB_MUSB_OTG
+ musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+ DBG(1, "HNP: Setting timer for b_ase0_brst\n");
+ musb_otg_timer.data = (unsigned long)musb;
+ mod_timer(&musb_otg_timer, jiffies
+ + msecs_to_jiffies(TB_ASE0_BRST));
+#endif
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv.state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ DBG(1, "REVISIT: SUSPEND as B_HOST\n");
+ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+ break;
+ }
+ schedule_work(&musb->irq_work);
+ }
+
+
+ return handled;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+* Program the HDRC to start (enable interrupts, dma, etc.).
+*/
+void musb_start(struct musb *musb)
+{
+ void __iomem *regs = musb->mregs;
+ u8 devctl = musb_readb(regs, MUSB_DEVCTL);
+
+ DBG(2, "<== devctl %02x\n", devctl);
+
+ /* Set INT enable registers, enable interrupts */
+ musb_writew(regs, MUSB_INTRTXE, musb->epmask);
+ musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
+ musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+ musb_writeb(regs, MUSB_TESTMODE, 0);
+
+ /* put into basic highspeed mode and start session */
+ musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+ | MUSB_POWER_SOFTCONN
+ | MUSB_POWER_HSENAB
+ /* ENSUSPEND wedges tusb */
+ /* | MUSB_POWER_ENSUSPEND */
+ );
+
+ musb->is_active = 0;
+ devctl = musb_readb(regs, MUSB_DEVCTL);
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ if (is_otg_enabled(musb)) {
+ /* session started after:
+ * (a) ID-grounded irq, host mode;
+ * (b) vbus present/connect IRQ, peripheral mode;
+ * (c) peripheral initiates, using SRP
+ */
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->is_active = 1;
+ else
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ } else if (is_host_enabled(musb)) {
+ /* assume ID pin is hard-wired to ground */
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ } else /* peripheral is enabled */ {
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->is_active = 1;
+ }
+ musb_platform_enable(musb);
+ musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
+
+static void musb_generic_disable(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+ u16 temp;
+
+ /* disable interrupts */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0);
+ musb_writew(mbase, MUSB_INTRTXE, 0);
+ musb_writew(mbase, MUSB_INTRRXE, 0);
+
+ /* off */
+ musb_writeb(mbase, MUSB_DEVCTL, 0);
+
+ /* flush pending interrupts */
+ temp = musb_readb(mbase, MUSB_INTRUSB);
+ temp = musb_readw(mbase, MUSB_INTRTX);
+ temp = musb_readw(mbase, MUSB_INTRRX);
+
+}
+
+/*
+ * Make the HDRC stop (disable interrupts, etc.);
+ * reversible by musb_start
+ * called on gadget driver unregister
+ * with controller locked, irqs blocked
+ * acts as a NOP unless some role activated the hardware
+ */
+void musb_stop(struct musb *musb)
+{
+ /* stop IRQs, timers, ... */
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ DBG(3, "HDRC disabled\n");
+
+ /* FIXME
+ * - mark host and/or peripheral drivers unusable/inactive
+ * - disable DMA (and enable it in HdrcStart)
+ * - make sure we can musb_start() after musb_stop(); with
+ * OTG mode, gadget driver module rmmod/modprobe cycles that
+ * - ...
+ */
+ musb_platform_try_idle(musb, 0);
+}
+
+static void musb_shutdown(struct platform_device *pdev)
+{
+ struct musb *musb = dev_to_musb(&pdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ if (musb->clock) {
+ clk_put(musb->clock);
+ musb->clock = NULL;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* FIXME power down */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The silicon either has hard-wired endpoint configurations, or else
+ * "dynamic fifo" sizing. The driver has support for both, though at this
+ * writing only the dynamic sizing is very well tested. We use normal
+ * idioms to so both modes are compile-tested, but dead code elimination
+ * leaves only the relevant one in the object file.
+ *
+ * We don't currently use dynamic fifo setup capability to do anything
+ * more than selecting one of a bunch of predefined configurations.
+ */
+#if defined(CONFIG_USB_TUSB6010) || \
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+static ushort __initdata fifo_mode = 4;
+#else
+static ushort __initdata fifo_mode = 2;
+#endif
+
+/* "modprobe ... fifo_mode=1" etc */
+module_param(fifo_mode, ushort, 0);
+MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
+
+
+enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
+enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
+
+struct fifo_cfg {
+ u8 hw_ep_num;
+ enum fifo_style style;
+ enum buf_mode mode;
+ u16 maxpacket;
+};
+
+/*
+ * tables defining fifo_mode values. define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+/* mode 0 - fits in 2KB */
+static struct fifo_cfg __initdata mode_0_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 1 - fits in 4KB */
+static struct fifo_cfg __initdata mode_1_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 2 - fits in 4KB */
+static struct fifo_cfg __initdata mode_2_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 3 - fits in 4KB */
+static struct fifo_cfg __initdata mode_3_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 4 - fits in 16KB */
+static struct fifo_cfg __initdata mode_4_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
+
+
+/*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+static int __init
+fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
+ const struct fifo_cfg *cfg, u16 offset)
+{
+ void __iomem *mbase = musb->mregs;
+ int size = 0;
+ u16 maxpacket = cfg->maxpacket;
+ u16 c_off = offset >> 3;
+ u8 c_size;
+
+ /* expect hw_ep has already been zero-initialized */
+
+ size = ffs(max(maxpacket, (u16) 8)) - 1;
+ maxpacket = 1 << size;
+
+ c_size = size - 3;
+ if (cfg->mode == BUF_DOUBLE) {
+ if ((offset + (maxpacket << 1)) >
+ (1 << (musb->config->ram_bits + 2)))
+ return -EMSGSIZE;
+ c_size |= MUSB_FIFOSZ_DPB;
+ } else {
+ if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
+ return -EMSGSIZE;
+ }
+
+ /* configure the FIFO */
+ musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* EP0 reserved endpoint for control, bidirectional;
+ * EP1 reserved for bulk, two unidirection halves.
+ */
+ if (hw_ep->epnum == 1)
+ musb->bulk_ep = hw_ep;
+ /* REVISIT error check: be sure ep0 can both rx and tx ... */
+#endif
+ switch (cfg->style) {
+ case FIFO_TX:
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_tx = maxpacket;
+ break;
+ case FIFO_RX:
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_rx = maxpacket;
+ break;
+ case FIFO_RXTX:
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_rx = maxpacket;
+
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
+ hw_ep->max_packet_sz_tx = maxpacket;
+
+ hw_ep->is_shared_fifo = true;
+ break;
+ }
+
+ /* NOTE rx and tx endpoint irqs aren't managed separately,
+ * which happens to be ok
+ */
+ musb->epmask |= (1 << hw_ep->epnum);
+
+ return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
+}
+
+static struct fifo_cfg __initdata ep0_cfg = {
+ .style = FIFO_RXTX, .maxpacket = 64,
+};
+
+static int __init ep_config_from_table(struct musb *musb)
+{
+ const struct fifo_cfg *cfg;
+ unsigned i, n;
+ int offset;
+ struct musb_hw_ep *hw_ep = musb->endpoints;
+
+ switch (fifo_mode) {
+ default:
+ fifo_mode = 0;
+ /* FALLTHROUGH */
+ case 0:
+ cfg = mode_0_cfg;
+ n = ARRAY_SIZE(mode_0_cfg);
+ break;
+ case 1:
+ cfg = mode_1_cfg;
+ n = ARRAY_SIZE(mode_1_cfg);
+ break;
+ case 2:
+ cfg = mode_2_cfg;
+ n = ARRAY_SIZE(mode_2_cfg);
+ break;
+ case 3:
+ cfg = mode_3_cfg;
+ n = ARRAY_SIZE(mode_3_cfg);
+ break;
+ case 4:
+ cfg = mode_4_cfg;
+ n = ARRAY_SIZE(mode_4_cfg);
+ break;
+ }
+
+ printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
+ musb_driver_name, fifo_mode);
+
+
+ offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
+ /* assert(offset > 0) */
+
+ /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
+ * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
+ */
+
+ for (i = 0; i < n; i++) {
+ u8 epn = cfg->hw_ep_num;
+
+ if (epn >= musb->config->num_eps) {
+ pr_debug("%s: invalid ep %d\n",
+ musb_driver_name, epn);
+ continue;
+ }
+ offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
+ if (offset < 0) {
+ pr_debug("%s: mem overrun, ep %d\n",
+ musb_driver_name, epn);
+ return -EINVAL;
+ }
+ epn++;
+ musb->nr_endpoints = max(epn, musb->nr_endpoints);
+ }
+
+ printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
+ musb_driver_name,
+ n + 1, musb->config->num_eps * 2 - 1,
+ offset, (1 << (musb->config->ram_bits + 2)));
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+#endif
+
+ return 0;
+}
+
+
+/*
+ * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
+ * @param musb the controller
+ */
+static int __init ep_config_from_hw(struct musb *musb)
+{
+ u8 epnum = 0, reg;
+ struct musb_hw_ep *hw_ep;
+ void *mbase = musb->mregs;
+
+ DBG(2, "<== static silicon ep config\n");
+
+ /* FIXME pick up ep0 maxpacket size */
+
+ for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
+ musb_ep_select(mbase, epnum);
+ hw_ep = musb->endpoints + epnum;
+
+ /* read from core using indexed model */
+ reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+ if (!reg) {
+ /* 0's returned when no more endpoints */
+ break;
+ }
+ musb->nr_endpoints++;
+ musb->epmask |= (1 << epnum);
+
+ hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
+
+ /* shared TX/RX FIFO? */
+ if ((reg & 0xf0) == 0xf0) {
+ hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
+ hw_ep->is_shared_fifo = true;
+ continue;
+ } else {
+ hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
+ hw_ep->is_shared_fifo = false;
+ }
+
+ /* FIXME set up hw_ep->{rx,tx}_double_buffered */
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* pick an RX/TX endpoint for bulk */
+ if (hw_ep->max_packet_sz_tx < 512
+ || hw_ep->max_packet_sz_rx < 512)
+ continue;
+
+ /* REVISIT: this algorithm is lazy, we should at least
+ * try to pick a double buffered endpoint.
+ */
+ if (musb->bulk_ep)
+ continue;
+ musb->bulk_ep = hw_ep;
+#endif
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+#endif
+
+ return 0;
+}
+
+enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
+
+/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+static int __init musb_core_init(u16 musb_type, struct musb *musb)
+{
+#ifdef MUSB_AHB_ID
+ u32 data;
+#endif
+ u8 reg;
+ char *type;
+ u16 hwvers, rev_major, rev_minor;
+ char aInfo[78], aRevision[32], aDate[12];
+ void __iomem *mbase = musb->mregs;
+ int status = 0;
+ int i;
+
+ /* log core options (read using indexed model) */
+ musb_ep_select(mbase, 0);
+ reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+
+ strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
+ if (reg & MUSB_CONFIGDATA_DYNFIFO)
+ strcat(aInfo, ", dyn FIFOs");
+ if (reg & MUSB_CONFIGDATA_MPRXE) {
+ strcat(aInfo, ", bulk combine");
+#ifdef C_MP_RX
+ musb->bulk_combine = true;
+#else
+ strcat(aInfo, " (X)"); /* no driver support */
+#endif
+ }
+ if (reg & MUSB_CONFIGDATA_MPTXE) {
+ strcat(aInfo, ", bulk split");
+#ifdef C_MP_TX
+ musb->bulk_split = true;
+#else
+ strcat(aInfo, " (X)"); /* no driver support */
+#endif
+ }
+ if (reg & MUSB_CONFIGDATA_HBRXE) {
+ strcat(aInfo, ", HB-ISO Rx");
+ strcat(aInfo, " (X)"); /* no driver support */
+ }
+ if (reg & MUSB_CONFIGDATA_HBTXE) {
+ strcat(aInfo, ", HB-ISO Tx");
+ strcat(aInfo, " (X)"); /* no driver support */
+ }
+ if (reg & MUSB_CONFIGDATA_SOFTCONE)
+ strcat(aInfo, ", SoftConn");
+
+ printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
+ musb_driver_name, reg, aInfo);
+
+#ifdef MUSB_AHB_ID
+ data = musb_readl(mbase, 0x404);
+ sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
+ (data >> 16) & 0xff, (data >> 24) & 0xff);
+ /* FIXME ID2 and ID3 are unused */
+ data = musb_readl(mbase, 0x408);
+ printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
+ data = musb_readl(mbase, 0x40c);
+ printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
+ reg = musb_readb(mbase, 0x400);
+ musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
+#else
+ aDate[0] = 0;
+#endif
+ if (MUSB_CONTROLLER_MHDRC == musb_type) {
+ musb->is_multipoint = 1;
+ type = "M";
+ } else {
+ musb->is_multipoint = 0;
+ type = "";
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
+ printk(KERN_ERR
+ "%s: kernel must blacklist external hubs\n",
+ musb_driver_name);
+#endif
+#endif
+ }
+
+ /* log release info */
+ hwvers = musb_readw(mbase, MUSB_HWVERS);
+ rev_major = (hwvers >> 10) & 0x1f;
+ rev_minor = hwvers & 0x3ff;
+ snprintf(aRevision, 32, "%d.%d%s", rev_major,
+ rev_minor, (hwvers & 0x8000) ? "RC" : "");
+ printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
+ musb_driver_name, type, aRevision, aDate);
+
+ /* configure ep0 */
+ musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+
+ /* discover endpoint configuration */
+ musb->nr_endpoints = 1;
+ musb->epmask = 1;
+
+ if (reg & MUSB_CONFIGDATA_DYNFIFO) {
+ if (musb->config->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else {
+ ERR("reconfigure software for Dynamic FIFOs\n");
+ status = -ENODEV;
+ }
+ } else {
+ if (!musb->config->dyn_fifo)
+ status = ep_config_from_hw(musb);
+ else {
+ ERR("reconfigure software for static FIFOs\n");
+ return -ENODEV;
+ }
+ }
+
+ if (status < 0)
+ return status;
+
+ /* finish init, and print endpoint config */
+ for (i = 0; i < musb->nr_endpoints; i++) {
+ struct musb_hw_ep *hw_ep = musb->endpoints + i;
+
+ hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
+#ifdef CONFIG_USB_TUSB6010
+ hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
+ hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
+ hw_ep->fifo_sync_va =
+ musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
+
+ if (i == 0)
+ hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
+ else
+ hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
+#endif
+
+ hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
+ hw_ep->rx_reinit = 1;
+ hw_ep->tx_reinit = 1;
+#endif
+
+ if (hw_ep->max_packet_sz_tx) {
+ printk(KERN_DEBUG
+ "%s: hw_ep %d%s, %smax %d\n",
+ musb_driver_name, i,
+ hw_ep->is_shared_fifo ? "shared" : "tx",
+ hw_ep->tx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->max_packet_sz_tx);
+ }
+ if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
+ printk(KERN_DEBUG
+ "%s: hw_ep %d%s, %smax %d\n",
+ musb_driver_name, i,
+ "rx",
+ hw_ep->rx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->max_packet_sz_rx);
+ }
+ if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
+ DBG(1, "hw_ep %d not configured\n", i);
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* REVISIT we sometimes get spurious IRQs on g_ep0
+ * not clear why...
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "spurious?\n");
+
+ return IRQ_HANDLED;
+}
+
+#else
+#define generic_interrupt NULL
+#endif
+
+/*
+ * handle all the irqs defined by the HDRC core. for now we expect: other
+ * irq sources (phy, dma, etc) will be handled first, musb->int_* values
+ * will be assigned, and the irq will already have been acked.
+ *
+ * called in irq context with spinlock held, irqs blocked
+ */
+irqreturn_t musb_interrupt(struct musb *musb)
+{
+ irqreturn_t retval = IRQ_NONE;
+ u8 devctl, power;
+ int ep_num;
+ u32 reg;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ power = musb_readb(musb->mregs, MUSB_POWER);
+
+ DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
+ (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
+ musb->int_usb, musb->int_tx, musb->int_rx);
+
+ /* the core can interrupt us for multiple reasons; docs have
+ * a generic interrupt flowchart to follow
+ */
+ if (musb->int_usb & STAGE0_MASK)
+ retval |= musb_stage0_irq(musb, musb->int_usb,
+ devctl, power);
+
+ /* "stage 1" is handling endpoint irqs */
+
+ /* handle endpoint 0 first */
+ if (musb->int_tx & 1) {
+ if (devctl & MUSB_DEVCTL_HM)
+ retval |= musb_h_ep0_irq(musb);
+ else
+ retval |= musb_g_ep0_irq(musb);
+ }
+
+ /* RX on endpoints 1-15 */
+ reg = musb->int_rx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+ /* musb_ep_select(musb->mregs, ep_num); */
+ /* REVISIT just retval = ep->rx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_rx(musb, ep_num);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_rx(musb, ep_num);
+ }
+ }
+
+ reg >>= 1;
+ ep_num++;
+ }
+
+ /* TX on endpoints 1-15 */
+ reg = musb->int_tx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+ /* musb_ep_select(musb->mregs, ep_num); */
+ /* REVISIT just retval |= ep->tx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_tx(musb, ep_num);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_tx(musb, ep_num);
+ }
+ }
+ reg >>= 1;
+ ep_num++;
+ }
+
+ /* finish handling "global" interrupts after handling fifos */
+ if (musb->int_usb)
+ retval |= musb_stage2_irq(musb,
+ musb->int_usb, devctl, power);
+
+ return retval;
+}
+
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static int __initdata use_dma = 1;
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+
+void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+{
+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ /* called with controller lock already held */
+
+ if (!epnum) {
+#ifndef CONFIG_USB_TUSB_OMAP_DMA
+ if (!is_cppi_enabled()) {
+ /* endpoint 0 */
+ if (devctl & MUSB_DEVCTL_HM)
+ musb_h_ep0_irq(musb);
+ else
+ musb_g_ep0_irq(musb);
+ }
+#endif
+ } else {
+ /* endpoints 1..15 */
+ if (transmit) {
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_tx(musb, epnum);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_tx(musb, epnum);
+ }
+ } else {
+ /* receive */
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_rx(musb, epnum);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_rx(musb, epnum);
+ }
+ }
+ }
+}
+
+#else
+#define use_dma 0
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t
+musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = sprintf(buf, "%s\n", otg_state_string(musb));
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static ssize_t
+musb_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ if (!strncmp(buf, "host", 4))
+ musb_platform_set_mode(musb, MUSB_HOST);
+ if (!strncmp(buf, "peripheral", 10))
+ musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+ if (!strncmp(buf, "otg", 3))
+ musb_platform_set_mode(musb, MUSB_OTG);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
+
+static ssize_t
+musb_vbus_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ unsigned long val;
+
+ if (sscanf(buf, "%lu", &val) < 1) {
+ printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->a_wait_bcon = val;
+ if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return n;
+}
+
+static ssize_t
+musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ unsigned long val;
+ int vbus;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ val = musb->a_wait_bcon;
+ vbus = musb_platform_get_vbus_status(musb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return sprintf(buf, "Vbus %s, timeout %lu\n",
+ vbus ? "on" : "off", val);
+}
+static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* Gadget drivers can't know that a host is connected so they might want
+ * to start SRP, but users can. This allows userspace to trigger SRP.
+ */
+static ssize_t
+musb_srp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned short srp;
+
+ if (sscanf(buf, "%hu", &srp) != 1
+ || (srp != 1)) {
+ printk(KERN_ERR "SRP: Value must be 1\n");
+ return -EINVAL;
+ }
+
+ if (srp == 1)
+ musb_g_wakeup(musb);
+
+ return n;
+}
+static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
+
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+
+#endif /* sysfs */
+
+/* Only used to provide driver mode change events */
+static void musb_irq_work(struct work_struct *data)
+{
+ struct musb *musb = container_of(data, struct musb, irq_work);
+ static int old_state;
+
+ if (musb->xceiv.state != old_state) {
+ old_state = musb->xceiv.state;
+ sysfs_notify(&musb->controller->kobj, NULL, "mode");
+ }
+}
+
+/* --------------------------------------------------------------------------
+ * Init support
+ */
+
+static struct musb *__init
+allocate_instance(struct device *dev,
+ struct musb_hdrc_config *config, void __iomem *mbase)
+{
+ struct musb *musb;
+ struct musb_hw_ep *ep;
+ int epnum;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ struct usb_hcd *hcd;
+
+ hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
+ if (!hcd)
+ return NULL;
+ /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+
+ musb = hcd_to_musb(hcd);
+ INIT_LIST_HEAD(&musb->control);
+ INIT_LIST_HEAD(&musb->in_bulk);
+ INIT_LIST_HEAD(&musb->out_bulk);
+
+ hcd->uses_new_polling = 1;
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+#else
+ musb = kzalloc(sizeof *musb, GFP_KERNEL);
+ if (!musb)
+ return NULL;
+ dev_set_drvdata(dev, musb);
+
+#endif
+
+ musb->mregs = mbase;
+ musb->ctrl_base = mbase;
+ musb->nIrq = -ENODEV;
+ musb->config = config;
+ for (epnum = 0, ep = musb->endpoints;
+ epnum < musb->config->num_eps;
+ epnum++, ep++) {
+
+ ep->musb = musb;
+ ep->epnum = epnum;
+ }
+
+ musb->controller = dev;
+ return musb;
+}
+
+static void musb_free(struct musb *musb)
+{
+ /* this has multiple entry modes. it handles fault cleanup after
+ * probe(), where things may be partially set up, as well as rmmod
+ * cleanup after everything's been de-activated.
+ */
+
+#ifdef CONFIG_SYSFS
+ device_remove_file(musb->controller, &dev_attr_mode);
+ device_remove_file(musb->controller, &dev_attr_vbus);
+#ifdef CONFIG_USB_MUSB_OTG
+ device_remove_file(musb->controller, &dev_attr_srp);
+#endif
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ musb_gadget_cleanup(musb);
+#endif
+
+ if (musb->nIrq >= 0) {
+ disable_irq_wake(musb->nIrq);
+ free_irq(musb->nIrq, musb);
+ }
+ if (is_dma_capable() && musb->dma_controller) {
+ struct dma_controller *c = musb->dma_controller;
+
+ (void) c->stop(c);
+ dma_controller_destroy(c);
+ }
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_platform_exit(musb);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
+ if (musb->clock) {
+ clk_disable(musb->clock);
+ clk_put(musb->clock);
+ }
+
+#ifdef CONFIG_USB_MUSB_OTG
+ put_device(musb->xceiv.dev);
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ usb_put_hcd(musb_to_hcd(musb));
+#else
+ kfree(musb);
+#endif
+}
+
+/*
+ * Perform generic per-controller initialization.
+ *
+ * @pDevice: the controller (already clocked, etc)
+ * @nIrq: irq
+ * @mregs: virtual address of controller registers,
+ * not yet corrected for platform-specific offsets
+ */
+static int __init
+musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+{
+ int status;
+ struct musb *musb;
+ struct musb_hdrc_platform_data *plat = dev->platform_data;
+
+ /* The driver might handle more features than the board; OK.
+ * Fail when the board needs a feature that's not enabled.
+ */
+ if (!plat) {
+ dev_dbg(dev, "no platform_data?\n");
+ return -ENODEV;
+ }
+ switch (plat->mode) {
+ case MUSB_HOST:
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ break;
+#else
+ goto bad_config;
+#endif
+ case MUSB_PERIPHERAL:
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ break;
+#else
+ goto bad_config;
+#endif
+ case MUSB_OTG:
+#ifdef CONFIG_USB_MUSB_OTG
+ break;
+#else
+bad_config:
+#endif
+ default:
+ dev_err(dev, "incompatible Kconfig role setting\n");
+ return -EINVAL;
+ }
+
+ /* allocate */
+ musb = allocate_instance(dev, plat->config, ctrl);
+ if (!musb)
+ return -ENOMEM;
+
+ spin_lock_init(&musb->lock);
+ musb->board_mode = plat->mode;
+ musb->board_set_power = plat->set_power;
+ musb->set_clock = plat->set_clock;
+ musb->min_power = plat->min_power;
+
+ /* Clock usage is chip-specific ... functional clock (DaVinci,
+ * OMAP2430), or PHY ref (some TUSB6010 boards). All this core
+ * code does is make sure a clock handle is available; platform
+ * code manages it during start/stop and suspend/resume.
+ */
+ if (plat->clock) {
+ musb->clock = clk_get(dev, plat->clock);
+ if (IS_ERR(musb->clock)) {
+ status = PTR_ERR(musb->clock);
+ musb->clock = NULL;
+ goto fail;
+ }
+ }
+
+ /* assume vbus is off */
+
+ /* platform adjusts musb->mregs and musb->isr if needed,
+ * and activates clocks
+ */
+ musb->isr = generic_interrupt;
+ status = musb_platform_init(musb);
+
+ if (status < 0)
+ goto fail;
+ if (!musb->isr) {
+ status = -ENODEV;
+ goto fail2;
+ }
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ if (use_dma && dev->dma_mask) {
+ struct dma_controller *c;
+
+ c = dma_controller_create(musb, musb->mregs);
+ musb->dma_controller = c;
+ if (c)
+ (void) c->start(c);
+ }
+#endif
+ /* ideally this would be abstracted in platform setup */
+ if (!is_dma_capable() || !musb->dma_controller)
+ dev->dma_mask = NULL;
+
+ /* be sure interrupts are disabled before connecting ISR */
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+
+ /* setup musb parts of the core (especially endpoints) */
+ status = musb_core_init(plat->config->multipoint
+ ? MUSB_CONTROLLER_MHDRC
+ : MUSB_CONTROLLER_HDRC, musb);
+ if (status < 0)
+ goto fail2;
+
+ /* Init IRQ workqueue before request_irq */
+ INIT_WORK(&musb->irq_work, musb_irq_work);
+
+ /* attach to the IRQ */
+ if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
+ dev_err(dev, "request_irq %d failed!\n", nIrq);
+ status = -ENODEV;
+ goto fail2;
+ }
+ musb->nIrq = nIrq;
+/* FIXME this handles wakeup irqs wrong */
+ if (enable_irq_wake(nIrq) == 0)
+ device_init_wakeup(dev, 1);
+
+ pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
+ musb_driver_name,
+ ({char *s;
+ switch (musb->board_mode) {
+ case MUSB_HOST: s = "Host"; break;
+ case MUSB_PERIPHERAL: s = "Peripheral"; break;
+ default: s = "OTG"; break;
+ }; s; }),
+ ctrl,
+ (is_dma_capable() && musb->dma_controller)
+ ? "DMA" : "PIO",
+ musb->nIrq);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* host side needs more setup, except for no-host modes */
+ if (musb->board_mode != MUSB_PERIPHERAL) {
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+
+ if (musb->board_mode == MUSB_OTG)
+ hcd->self.otg_port = 1;
+ musb->xceiv.host = &hcd->self;
+ hcd->power_budget = 2 * (plat->power ? : 250);
+ }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+ /* For the host-only role, we can activate right away.
+ * (We expect the ID pin to be forcibly grounded!!)
+ * Otherwise, wait till the gadget driver hooks up.
+ */
+ if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+
+ status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+
+ DBG(1, "%s mode, status %d, devctl %02x %c\n",
+ "HOST", status,
+ musb_readb(musb->mregs, MUSB_DEVCTL),
+ (musb_readb(musb->mregs, MUSB_DEVCTL)
+ & MUSB_DEVCTL_BDEVICE
+ ? 'B' : 'A'));
+
+ } else /* peripheral is enabled */ {
+ MUSB_DEV_MODE(musb);
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+
+ status = musb_gadget_setup(musb);
+
+ DBG(1, "%s mode, status %d, dev%02x\n",
+ is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
+ status,
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+
+ }
+
+ if (status == 0)
+ musb_debug_create("driver/musb_hdrc", musb);
+ else {
+fail:
+ if (musb->clock)
+ clk_put(musb->clock);
+ device_init_wakeup(dev, 0);
+ musb_free(musb);
+ return status;
+ }
+
+#ifdef CONFIG_SYSFS
+ status = device_create_file(dev, &dev_attr_mode);
+ status = device_create_file(dev, &dev_attr_vbus);
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ status = device_create_file(dev, &dev_attr_srp);
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+ status = 0;
+#endif
+
+ return status;
+
+fail2:
+ musb_platform_exit(musb);
+ goto fail;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static u64 *orig_dma_mask;
+#endif
+
+static int __init musb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq = platform_get_irq(pdev, 0);
+ struct resource *iomem;
+ void __iomem *base;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem || irq == 0)
+ return -ENODEV;
+
+ base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+ if (!base) {
+ dev_err(dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ /* clobbered by use_dma=n */
+ orig_dma_mask = dev->dma_mask;
+#endif
+ return musb_init_controller(dev, irq, base);
+}
+
+static int __devexit musb_remove(struct platform_device *pdev)
+{
+ struct musb *musb = dev_to_musb(&pdev->dev);
+ void __iomem *ctrl_base = musb->ctrl_base;
+
+ /* this gets called on rmmod.
+ * - Host mode: host may still be active
+ * - Peripheral mode: peripheral is deactivated (or never-activated)
+ * - OTG mode: both roles are deactivated (or never-activated)
+ */
+ musb_shutdown(pdev);
+ musb_debug_delete("driver/musb_hdrc", musb);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (musb->board_mode == MUSB_HOST)
+ usb_remove_hcd(musb_to_hcd(musb));
+#endif
+ musb_free(musb);
+ iounmap(ctrl_base);
+ device_init_wakeup(&pdev->dev, 0);
+#ifndef CONFIG_MUSB_PIO_ONLY
+ pdev->dev.dma_mask = orig_dma_mask;
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int musb_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ unsigned long flags;
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ if (!musb->clock)
+ return 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (is_peripheral_active(musb)) {
+ /* FIXME force disconnect unless we know USB will wake
+ * the system up quickly enough to respond ...
+ */
+ } else if (is_host_active(musb)) {
+ /* we know all the children are suspended; sometimes
+ * they will even be wakeup-enabled.
+ */
+ }
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ else
+ clk_disable(musb->clock);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static int musb_resume(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ if (!musb->clock)
+ return 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ else
+ clk_enable(musb->clock);
+
+ /* for static cmos like DaVinci, register values were preserved
+ * unless for some reason the whole soc powered down and we're
+ * not treating that as a whole-system restart (e.g. swsusp)
+ */
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+#else
+#define musb_suspend NULL
+#define musb_resume NULL
+#endif
+
+static struct platform_driver musb_driver = {
+ .driver = {
+ .name = (char *)musb_driver_name,
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(musb_remove),
+ .shutdown = musb_shutdown,
+ .suspend = musb_suspend,
+ .resume = musb_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init musb_init(void)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (usb_disabled())
+ return 0;
+#endif
+
+ pr_info("%s: version " MUSB_VERSION ", "
+#ifdef CONFIG_MUSB_PIO_ONLY
+ "pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ "cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+ "musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ "tusb-omap-dma"
+#else
+ "?dma?"
+#endif
+ ", "
+#ifdef CONFIG_USB_MUSB_OTG
+ "otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+ "peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+ "host"
+#endif
+ ", debug=%d\n",
+ musb_driver_name, debug);
+ return platform_driver_probe(&musb_driver, musb_probe);
+}
+
+/* make us init after usbcore and before usb
+ * gadget and host-side drivers start to register
+ */
+subsys_initcall(musb_init);
+
+static void __exit musb_cleanup(void)
+{
+ platform_driver_unregister(&musb_driver);
+}
+module_exit(musb_cleanup);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
new file mode 100644
index 000000000000..eade46d81708
--- /dev/null
+++ b/drivers/usb/musb/musb_core.h
@@ -0,0 +1,507 @@
+/*
+ * MUSB OTG driver defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_CORE_H__
+#define __MUSB_CORE_H__
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/musb.h>
+
+struct musb;
+struct musb_hw_ep;
+struct musb_ep;
+
+
+#include "musb_debug.h"
+#include "musb_dma.h"
+
+#include "musb_io.h"
+#include "musb_regs.h"
+
+#include "musb_gadget.h"
+#include "../core/hcd.h"
+#include "musb_host.h"
+
+
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
+#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
+#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
+
+/* NOTE: otg and peripheral-only state machines start at B_IDLE.
+ * OTG or host-only go to A_IDLE when ID is sensed.
+ */
+#define is_peripheral_active(m) (!(m)->is_host)
+#define is_host_active(m) ((m)->is_host)
+
+#else
+#define is_peripheral_enabled(musb) is_peripheral_capable()
+#define is_host_enabled(musb) is_host_capable()
+#define is_otg_enabled(musb) 0
+
+#define is_peripheral_active(musb) is_peripheral_capable()
+#define is_host_active(musb) is_host_capable()
+#endif
+
+#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
+/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
+ * override that choice selection (often USB_GADGET_DUMMY_HCD).
+ */
+#ifndef CONFIG_USB_GADGET_MUSB_HDRC
+#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
+#endif
+#endif /* need MUSB gadget selection */
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/fs.h>
+#define MUSB_CONFIG_PROC_FS
+#endif
+
+/****************************** PERIPHERAL ROLE *****************************/
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+#define is_peripheral_capable() (1)
+
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_wakeup(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+
+#else
+
+#define is_peripheral_capable() (0)
+
+static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_g_reset(struct musb *m) {}
+static inline void musb_g_suspend(struct musb *m) {}
+static inline void musb_g_resume(struct musb *m) {}
+static inline void musb_g_wakeup(struct musb *m) {}
+static inline void musb_g_disconnect(struct musb *m) {}
+
+#endif
+
+/****************************** HOST ROLE ***********************************/
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+#define is_host_capable() (1)
+
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+
+#else
+
+#define is_host_capable() (0)
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_host_tx(struct musb *m, u8 e) {}
+static inline void musb_host_rx(struct musb *m, u8 e) {}
+
+#endif
+
+
+/****************************** CONSTANTS ********************************/
+
+#ifndef MUSB_C_NUM_EPS
+#define MUSB_C_NUM_EPS ((u8)16)
+#endif
+
+#ifndef MUSB_MAX_END0_PACKET
+#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
+#endif
+
+/* host side ep0 states */
+enum musb_h_ep0_state {
+ MUSB_EP0_IDLE,
+ MUSB_EP0_START, /* expect ack of setup */
+ MUSB_EP0_IN, /* expect IN DATA */
+ MUSB_EP0_OUT, /* expect ack of OUT DATA */
+ MUSB_EP0_STATUS, /* expect ack of STATUS */
+} __attribute__ ((packed));
+
+/* peripheral side ep0 states */
+enum musb_g_ep0_state {
+ MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */
+ MUSB_EP0_STAGE_TX, /* IN data */
+ MUSB_EP0_STAGE_RX, /* OUT data */
+ MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
+ MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */
+ MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
+} __attribute__ ((packed));
+
+/* OTG protocol constants */
+#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
+#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */
+#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */
+
+/*************************** REGISTER ACCESS ********************************/
+
+/* Endpoint registers (other than dynfifo setup) can be accessed either
+ * directly with the "flat" model, or after setting up an index register.
+ */
+
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
+ || defined(CONFIG_ARCH_OMAP3430)
+/* REVISIT indexed access seemed to
+ * misbehave (on DaVinci) for at least peripheral IN ...
+ */
+#define MUSB_FLAT_REG
+#endif
+
+/* TUSB mapping: "flat" plus ep0 special cases */
+#if defined(CONFIG_USB_TUSB6010)
+#define musb_ep_select(_mbase, _epnum) \
+ musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
+
+/* "flat" mapping: each endpoint has its own i/o address */
+#elif defined(MUSB_FLAT_REG)
+#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
+#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
+
+/* "indexed" mapping: INDEX register controls register bank select */
+#else
+#define musb_ep_select(_mbase, _epnum) \
+ musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
+#endif
+
+/****************************** FUNCTIONS ********************************/
+
+#define MUSB_HST_MODE(_musb)\
+ { (_musb)->is_host = true; }
+#define MUSB_DEV_MODE(_musb) \
+ { (_musb)->is_host = false; }
+
+#define test_devctl_hst_mode(_x) \
+ (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
+
+#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
+
+/******************************** TYPES *************************************/
+
+/*
+ * struct musb_hw_ep - endpoint hardware (bidirectional)
+ *
+ * Ordered slightly for better cacheline locality.
+ */
+struct musb_hw_ep {
+ struct musb *musb;
+ void __iomem *fifo;
+ void __iomem *regs;
+
+#ifdef CONFIG_USB_TUSB6010
+ void __iomem *conf;
+#endif
+
+ /* index in musb->endpoints[] */
+ u8 epnum;
+
+ /* hardware configuration, possibly dynamic */
+ bool is_shared_fifo;
+ bool tx_double_buffered;
+ bool rx_double_buffered;
+ u16 max_packet_sz_tx;
+ u16 max_packet_sz_rx;
+
+ struct dma_channel *tx_channel;
+ struct dma_channel *rx_channel;
+
+#ifdef CONFIG_USB_TUSB6010
+ /* TUSB has "asynchronous" and "synchronous" dma modes */
+ dma_addr_t fifo_async;
+ dma_addr_t fifo_sync;
+ void __iomem *fifo_sync_va;
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ void __iomem *target_regs;
+
+ /* currently scheduled peripheral endpoint */
+ struct musb_qh *in_qh;
+ struct musb_qh *out_qh;
+
+ u8 rx_reinit;
+ u8 tx_reinit;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ /* peripheral side */
+ struct musb_ep ep_in; /* TX */
+ struct musb_ep ep_out; /* RX */
+#endif
+};
+
+static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ return next_request(&hw_ep->ep_in);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ return next_request(&hw_ep->ep_out);
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * struct musb - Driver instance data.
+ */
+struct musb {
+ /* device lock */
+ spinlock_t lock;
+ struct clk *clock;
+ irqreturn_t (*isr)(int, void *);
+ struct work_struct irq_work;
+
+/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
+#define MUSB_PORT_STAT_RESUME (1 << 31)
+
+ u32 port1_status;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ unsigned long rh_timer;
+
+ enum musb_h_ep0_state ep0_stage;
+
+ /* bulk traffic normally dedicates endpoint hardware, and each
+ * direction has its own ring of host side endpoints.
+ * we try to progress the transfer at the head of each endpoint's
+ * queue until it completes or NAKs too much; then we try the next
+ * endpoint.
+ */
+ struct musb_hw_ep *bulk_ep;
+
+ struct list_head control; /* of musb_qh */
+ struct list_head in_bulk; /* of musb_qh */
+ struct list_head out_bulk; /* of musb_qh */
+ struct musb_qh *periodic[32]; /* tree of interrupt+iso */
+#endif
+
+ /* called with IRQs blocked; ON/nonzero implies starting a session,
+ * and waiting at least a_wait_vrise_tmout.
+ */
+ void (*board_set_vbus)(struct musb *, int is_on);
+
+ struct dma_controller *dma_controller;
+
+ struct device *controller;
+ void __iomem *ctrl_base;
+ void __iomem *mregs;
+
+#ifdef CONFIG_USB_TUSB6010
+ dma_addr_t async;
+ dma_addr_t sync;
+ void __iomem *sync_va;
+#endif
+
+ /* passed down from chip/board specific irq handlers */
+ u8 int_usb;
+ u16 int_rx;
+ u16 int_tx;
+
+ struct otg_transceiver xceiv;
+
+ int nIrq;
+
+ struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
+#define control_ep endpoints
+
+#define VBUSERR_RETRY_COUNT 3
+ u16 vbuserr_retry;
+ u16 epmask;
+ u8 nr_endpoints;
+
+ u8 board_mode; /* enum musb_mode */
+ int (*board_set_power)(int state);
+
+ int (*set_clock)(struct clk *clk, int is_active);
+
+ u8 min_power; /* vbus for periph, in mA/2 */
+
+ bool is_host;
+
+ int a_wait_bcon; /* VBUS timeout in msecs */
+ unsigned long idle_timeout; /* Next timeout in jiffies */
+
+ /* active means connected and not suspended */
+ unsigned is_active:1;
+
+ unsigned is_multipoint:1;
+ unsigned ignore_disconnect:1; /* during bus resets */
+
+#ifdef C_MP_TX
+ unsigned bulk_split:1;
+#define can_bulk_split(musb,type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
+#else
+#define can_bulk_split(musb, type) 0
+#endif
+
+#ifdef C_MP_RX
+ unsigned bulk_combine:1;
+#define can_bulk_combine(musb,type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
+#else
+#define can_bulk_combine(musb, type) 0
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ /* is_suspended means USB B_PERIPHERAL suspend */
+ unsigned is_suspended:1;
+
+ /* may_wakeup means remote wakeup is enabled */
+ unsigned may_wakeup:1;
+
+ /* is_self_powered is reported in device status and the
+ * config descriptor. is_bus_powered means B_PERIPHERAL
+ * draws some VBUS current; both can be true.
+ */
+ unsigned is_self_powered:1;
+ unsigned is_bus_powered:1;
+
+ unsigned set_address:1;
+ unsigned test_mode:1;
+ unsigned softconnect:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u16 ackpend; /* ep0 */
+ enum musb_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver; /* its driver */
+#endif
+
+ struct musb_hdrc_config *config;
+
+#ifdef MUSB_CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry;
+#endif
+};
+
+static inline void musb_set_vbus(struct musb *musb, int is_on)
+{
+ musb->board_set_vbus(musb, is_on);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static inline struct musb *gadget_to_musb(struct usb_gadget *g)
+{
+ return container_of(g, struct musb, g);
+}
+#endif
+
+
+/***************************** Glue it together *****************************/
+
+extern const char musb_driver_name[];
+
+extern void musb_start(struct musb *musb);
+extern void musb_stop(struct musb *musb);
+
+extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
+extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+
+extern void musb_load_testpacket(struct musb *);
+
+extern irqreturn_t musb_interrupt(struct musb *);
+
+extern void musb_platform_enable(struct musb *musb);
+extern void musb_platform_disable(struct musb *musb);
+
+extern void musb_hnp_stop(struct musb *musb);
+
+extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+
+#if defined(CONFIG_USB_TUSB6010) || \
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
+#else
+#define musb_platform_try_idle(x, y) do {} while (0)
+#endif
+
+#ifdef CONFIG_USB_TUSB6010
+extern int musb_platform_get_vbus_status(struct musb *musb);
+#else
+#define musb_platform_get_vbus_status(x) 0
+#endif
+
+extern int __init musb_platform_init(struct musb *musb);
+extern int musb_platform_exit(struct musb *musb);
+
+/*-------------------------- ProcFS definitions ---------------------*/
+
+struct proc_dir_entry;
+
+#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS)
+extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data);
+extern void musb_debug_delete(char *name, struct musb *data);
+
+#else
+static inline struct proc_dir_entry *
+musb_debug_create(char *name, struct musb *data)
+{
+ return NULL;
+}
+static inline void musb_debug_delete(char *name, struct musb *data)
+{
+}
+#endif
+
+#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
new file mode 100644
index 000000000000..3bdb311e820d
--- /dev/null
+++ b/drivers/usb/musb/musb_debug.h
@@ -0,0 +1,66 @@
+/*
+ * MUSB OTG driver debug defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_DEBUG_H__
+#define __MUSB_LINUX_DEBUG_H__
+
+#define yprintk(facility, format, args...) \
+ do { printk(facility "%s %d: " format , \
+ __func__, __LINE__ , ## args); } while (0)
+#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args)
+#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
+#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+
+#define xprintk(level, facility, format, args...) do { \
+ if (_dbg_level(level)) { \
+ printk(facility "%s %d: " format , \
+ __func__, __LINE__ , ## args); \
+ } } while (0)
+
+#if MUSB_DEBUG > 0
+extern unsigned debug;
+#else
+#define debug 0
+#endif
+
+static inline int _dbg_level(unsigned l)
+{
+ return debug >= l;
+}
+
+#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
+
+extern const char *otg_state_string(struct musb *);
+
+#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
new file mode 100644
index 000000000000..0a2c4e3602c1
--- /dev/null
+++ b/drivers/usb/musb/musb_dma.h
@@ -0,0 +1,172 @@
+/*
+ * MUSB OTG driver DMA controller abstraction
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_DMA_H__
+#define __MUSB_DMA_H__
+
+struct musb_hw_ep;
+
+/*
+ * DMA Controller Abstraction
+ *
+ * DMA Controllers are abstracted to allow use of a variety of different
+ * implementations of DMA, as allowed by the Inventra USB cores. On the
+ * host side, usbcore sets up the DMA mappings and flushes caches; on the
+ * peripheral side, the gadget controller driver does. Responsibilities
+ * of a DMA controller driver include:
+ *
+ * - Handling the details of moving multiple USB packets
+ * in cooperation with the Inventra USB core, including especially
+ * the correct RX side treatment of short packets and buffer-full
+ * states (both of which terminate transfers).
+ *
+ * - Knowing the correlation between dma channels and the
+ * Inventra core's local endpoint resources and data direction.
+ *
+ * - Maintaining a list of allocated/available channels.
+ *
+ * - Updating channel status on interrupts,
+ * whether shared with the Inventra core or separate.
+ */
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+#define is_dma_capable() (1)
+#else
+#define is_dma_capable() (0)
+#endif
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+#define is_cppi_enabled() 1
+#else
+#define is_cppi_enabled() 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap() 1
+#else
+#define tusb_dma_omap() 0
+#endif
+
+/*
+ * DMA channel status ... updated by the dma controller driver whenever that
+ * status changes, and protected by the overall controller spinlock.
+ */
+enum dma_channel_status {
+ /* unallocated */
+ MUSB_DMA_STATUS_UNKNOWN,
+ /* allocated ... but not busy, no errors */
+ MUSB_DMA_STATUS_FREE,
+ /* busy ... transactions are active */
+ MUSB_DMA_STATUS_BUSY,
+ /* transaction(s) aborted due to ... dma or memory bus error */
+ MUSB_DMA_STATUS_BUS_ABORT,
+ /* transaction(s) aborted due to ... core error or USB fault */
+ MUSB_DMA_STATUS_CORE_ABORT
+};
+
+struct dma_controller;
+
+/**
+ * struct dma_channel - A DMA channel.
+ * @private_data: channel-private data
+ * @max_len: the maximum number of bytes the channel can move in one
+ * transaction (typically representing many USB maximum-sized packets)
+ * @actual_len: how many bytes have been transferred
+ * @status: current channel status (updated e.g. on interrupt)
+ * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
+ *
+ * channels are associated with an endpoint for the duration of at least
+ * one usb transfer.
+ */
+struct dma_channel {
+ void *private_data;
+ /* FIXME not void* private_data, but a dma_controller * */
+ size_t max_len;
+ size_t actual_len;
+ enum dma_channel_status status;
+ bool desired_mode;
+};
+
+/*
+ * dma_channel_status - return status of dma channel
+ * @c: the channel
+ *
+ * Returns the software's view of the channel status. If that status is BUSY
+ * then it's possible that the hardware has completed (or aborted) a transfer,
+ * so the driver needs to update that status.
+ */
+static inline enum dma_channel_status
+dma_channel_status(struct dma_channel *c)
+{
+ return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/**
+ * struct dma_controller - A DMA Controller.
+ * @start: call this to start a DMA controller;
+ * return 0 on success, else negative errno
+ * @stop: call this to stop a DMA controller
+ * return 0 on success, else negative errno
+ * @channel_alloc: call this to allocate a DMA channel
+ * @channel_release: call this to release a DMA channel
+ * @channel_abort: call this to abort a pending DMA transaction,
+ * returning it to FREE (but allocated) state
+ *
+ * Controllers manage dma channels.
+ */
+struct dma_controller {
+ int (*start)(struct dma_controller *);
+ int (*stop)(struct dma_controller *);
+ struct dma_channel *(*channel_alloc)(struct dma_controller *,
+ struct musb_hw_ep *, u8 is_tx);
+ void (*channel_release)(struct dma_channel *);
+ int (*channel_program)(struct dma_channel *channel,
+ u16 maxpacket, u8 mode,
+ dma_addr_t dma_addr,
+ u32 length);
+ int (*channel_abort)(struct dma_channel *);
+};
+
+/* called after channel_program(), may indicate a fault */
+extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+
+
+extern struct dma_controller *__init
+dma_controller_create(struct musb *, void __iomem *);
+
+extern void dma_controller_destroy(struct dma_controller *);
+
+#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
new file mode 100644
index 000000000000..d6a802c224fa
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.c
@@ -0,0 +1,2031 @@
+/*
+ * MUSB OTG driver peripheral support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+#include <linux/dma-mapping.h>
+
+#include "musb_core.h"
+
+
+/* MUSB PERIPHERAL status 3-mar-2006:
+ *
+ * - EP0 seems solid. It passes both USBCV and usbtest control cases.
+ * Minor glitches:
+ *
+ * + remote wakeup to Linux hosts work, but saw USBCV failures;
+ * in one test run (operator error?)
+ * + endpoint halt tests -- in both usbtest and usbcv -- seem
+ * to break when dma is enabled ... is something wrongly
+ * clearing SENDSTALL?
+ *
+ * - Mass storage behaved ok when last tested. Network traffic patterns
+ * (with lots of short transfers etc) need retesting; they turn up the
+ * worst cases of the DMA, since short packets are typical but are not
+ * required.
+ *
+ * - TX/IN
+ * + both pio and dma behave in with network and g_zero tests
+ * + no cppi throughput issues other than no-hw-queueing
+ * + failed with FLAT_REG (DaVinci)
+ * + seems to behave with double buffering, PIO -and- CPPI
+ * + with gadgetfs + AIO, requests got lost?
+ *
+ * - RX/OUT
+ * + both pio and dma behave in with network and g_zero tests
+ * + dma is slow in typical case (short_not_ok is clear)
+ * + double buffering ok with PIO
+ * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
+ * + request lossage observed with gadgetfs
+ *
+ * - ISO not tested ... might work, but only weakly isochronous
+ *
+ * - Gadget driver disabling of softconnect during bind() is ignored; so
+ * drivers can't hold off host requests until userspace is ready.
+ * (Workaround: they can turn it off later.)
+ *
+ * - PORTABILITY (assumes PIO works):
+ * + DaVinci, basically works with cppi dma
+ * + OMAP 2430, ditto with mentor dma
+ * + TUSB 6010, platform-specific dma in the works
+ */
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Immediately complete a request.
+ *
+ * @param request the request to complete
+ * @param status the status to complete the request with
+ * Context: controller locked, IRQs blocked.
+ */
+void musb_g_giveback(
+ struct musb_ep *ep,
+ struct usb_request *request,
+ int status)
+__releases(ep->musb->lock)
+__acquires(ep->musb->lock)
+{
+ struct musb_request *req;
+ struct musb *musb;
+ int busy = ep->busy;
+
+ req = to_musb_request(request);
+
+ list_del(&request->list);
+ if (req->request.status == -EINPROGRESS)
+ req->request.status = status;
+ musb = req->musb;
+
+ ep->busy = 1;
+ spin_unlock(&musb->lock);
+ if (is_dma_capable()) {
+ if (req->mapped) {
+ dma_unmap_single(musb->controller,
+ req->request.dma,
+ req->request.length,
+ req->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ req->request.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ } else if (req->request.dma != DMA_ADDR_INVALID)
+ dma_sync_single_for_cpu(musb->controller,
+ req->request.dma,
+ req->request.length,
+ req->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ }
+ if (request->status == 0)
+ DBG(5, "%s done request %p, %d/%d\n",
+ ep->end_point.name, request,
+ req->request.actual, req->request.length);
+ else
+ DBG(2, "%s request %p, %d/%d fault %d\n",
+ ep->end_point.name, request,
+ req->request.actual, req->request.length,
+ request->status);
+ req->request.complete(&req->ep->end_point, &req->request);
+ spin_lock(&musb->lock);
+ ep->busy = busy;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Abort requests queued to an endpoint using the status. Synchronous.
+ * caller locked controller and blocked irqs, and selected this ep.
+ */
+static void nuke(struct musb_ep *ep, const int status)
+{
+ struct musb_request *req = NULL;
+ void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
+
+ ep->busy = 1;
+
+ if (is_dma_capable() && ep->dma) {
+ struct dma_controller *c = ep->musb->dma_controller;
+ int value;
+ if (ep->is_in) {
+ musb_writew(epio, MUSB_TXCSR,
+ 0 | MUSB_TXCSR_FLUSHFIFO);
+ musb_writew(epio, MUSB_TXCSR,
+ 0 | MUSB_TXCSR_FLUSHFIFO);
+ } else {
+ musb_writew(epio, MUSB_RXCSR,
+ 0 | MUSB_RXCSR_FLUSHFIFO);
+ musb_writew(epio, MUSB_RXCSR,
+ 0 | MUSB_RXCSR_FLUSHFIFO);
+ }
+
+ value = c->channel_abort(ep->dma);
+ DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
+ c->channel_release(ep->dma);
+ ep->dma = NULL;
+ }
+
+ while (!list_empty(&(ep->req_list))) {
+ req = container_of(ep->req_list.next, struct musb_request,
+ request.list);
+ musb_g_giveback(ep, &req->request, status);
+ }
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* Data transfers - pure PIO, pure DMA, or mixed mode */
+
+/*
+ * This assumes the separate CPPI engine is responding to DMA requests
+ * from the usb core ... sequenced a bit differently from mentor dma.
+ */
+
+static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
+{
+ if (can_bulk_split(musb, ep->type))
+ return ep->hw_ep->max_packet_sz_tx;
+ else
+ return ep->packet_sz;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral tx (IN) using Mentor DMA works as follows:
+ Only mode 0 is used for transfers <= wPktSize,
+ mode 1 is used for larger transfers,
+
+ One of the following happens:
+ - Host sends IN token which causes an endpoint interrupt
+ -> TxAvail
+ -> if DMA is currently busy, exit.
+ -> if queue is non-empty, txstate().
+
+ - Request is queued by the gadget driver.
+ -> if queue was previously empty, txstate()
+
+ txstate()
+ -> start
+ /\ -> setup DMA
+ | (data is transferred to the FIFO, then sent out when
+ | IN token(s) are recd from Host.
+ | -> DMA interrupt on completion
+ | calls TxAvail.
+ | -> stop DMA, ~DmaEenab,
+ | -> set TxPktRdy for last short pkt or zlp
+ | -> Complete Request
+ | -> Continue next request (call txstate)
+ |___________________________________|
+
+ * Non-Mentor DMA engines can of course work differently, such as by
+ * upleveling from irq-per-packet to irq-per-buffer.
+ */
+
+#endif
+
+/*
+ * An endpoint is transmitting data. This can be called either from
+ * the IRQ routine or from ep.queue() to kickstart a request on an
+ * endpoint.
+ *
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void txstate(struct musb *musb, struct musb_request *req)
+{
+ u8 epnum = req->epnum;
+ struct musb_ep *musb_ep;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct usb_request *request;
+ u16 fifo_count = 0, csr;
+ int use_dma = 0;
+
+ musb_ep = req->ep;
+
+ /* we shouldn't get here while DMA is active ... but we do ... */
+ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
+ DBG(4, "dma pending...\n");
+ return;
+ }
+
+ /* read TXCSR before */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ request = &req->request;
+ fifo_count = min(max_ep_writesize(musb, musb_ep),
+ (int)(request->length - request->actual));
+
+ if (csr & MUSB_TXCSR_TXPKTRDY) {
+ DBG(5, "%s old packet still ready , txcsr %03x\n",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ if (csr & MUSB_TXCSR_P_SENDSTALL) {
+ DBG(5, "%s stalling, txcsr %03x\n",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+ epnum, musb_ep->packet_sz, fifo_count,
+ csr);
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+
+ use_dma = (request->dma != DMA_ADDR_INVALID);
+
+ /* MUSB_TXCSR_P_ISO is still set correctly */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ {
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min(request->length,
+ musb_ep->dma->max_len);
+ if (request_size <= musb_ep->packet_sz)
+ musb_ep->dma->desired_mode = 0;
+ else
+ musb_ep->dma->desired_mode = 1;
+
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ musb_ep->dma->desired_mode,
+ request->dma, request_size);
+ if (use_dma) {
+ if (musb_ep->dma->desired_mode == 0) {
+ /* ASSERT: DMAENAB is clear */
+ csr &= ~(MUSB_TXCSR_AUTOSET |
+ MUSB_TXCSR_DMAMODE);
+ csr |= (MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_MODE);
+ /* against programming guide */
+ } else
+ csr |= (MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_MODE);
+
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ }
+
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ /* program endpoint CSR first, then setup DMA */
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR,
+ (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
+ | csr);
+
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* NOTE host side sets DMAENAB later than this; both are
+ * OK since the transfer dma glue (between CPPI and Mentor
+ * fifos) just tells CPPI it could start. Data only moves
+ * to the USB TX fifo when both fifos are ready.
+ */
+
+ /* "mode" is irrelevant here; handle terminating ZLPs like
+ * PIO does, since the hardware RNDIS mode seems unreliable
+ * except for the last-packet-is-already-short case.
+ */
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ 0,
+ request->dma,
+ request->length);
+ if (!use_dma) {
+ c->channel_release(musb_ep->dma);
+ musb_ep->dma = NULL;
+ /* ASSERT: DMAENAB clear */
+ csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ /* invariant: prequest->buf is non-null */
+ }
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ request->zero,
+ request->dma,
+ request->length);
+#endif
+ }
+#endif
+
+ if (!use_dma) {
+ musb_write_fifo(musb_ep->hw_ep, fifo_count,
+ (u8 *) (request->buf + request->actual));
+ request->actual += fifo_count;
+ csr |= MUSB_TXCSR_TXPKTRDY;
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ /* host may already have the data when this message shows... */
+ DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+ musb_ep->end_point.name, use_dma ? "dma" : "pio",
+ request->actual, request->length,
+ musb_readw(epio, MUSB_TXCSR),
+ fifo_count,
+ musb_readw(epio, MUSB_TXMAXP));
+}
+
+/*
+ * FIFO state update (e.g. data ready).
+ * Called from IRQ, with controller locked.
+ */
+void musb_g_tx(struct musb *musb, u8 epnum)
+{
+ u16 csr;
+ struct usb_request *request;
+ u8 __iomem *mbase = musb->mregs;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+ request = next_request(musb_ep);
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+
+ dma = is_dma_capable() ? musb_ep->dma : NULL;
+ do {
+ /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+ * probably rates reporting as a host error
+ */
+ if (csr & MUSB_TXCSR_P_SENTSTALL) {
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~MUSB_TXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ }
+
+ if (request)
+ musb_g_giveback(musb_ep, request, -EPIPE);
+
+ break;
+ }
+
+ if (csr & MUSB_TXCSR_P_UNDERRUN) {
+ /* we NAKed, no big deal ... little reason to care */
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ DBG(20, "underrun on ep%d, req %p\n", epnum, request);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /* SHOULD NOT HAPPEN ... has with cppi though, after
+ * changing SENDSTALL (and other cases); harmless?
+ */
+ DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
+ break;
+ }
+
+ if (request) {
+ u8 is_dma = 0;
+
+ if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+ is_dma = 1;
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ request->actual += musb_ep->dma->actual_len;
+ DBG(4, "TXCSR%d %04x, dma off, "
+ "len %zu, req %p\n",
+ epnum, csr,
+ musb_ep->dma->actual_len,
+ request);
+ }
+
+ if (is_dma || request->actual == request->length) {
+
+ /* First, maybe a terminating short packet.
+ * Some DMA engines might handle this by
+ * themselves.
+ */
+ if ((request->zero
+ && request->length
+ && (request->length
+ % musb_ep->packet_sz)
+ == 0)
+#ifdef CONFIG_USB_INVENTRA_DMA
+ || (is_dma &&
+ ((!dma->desired_mode) ||
+ (request->actual &
+ (musb_ep->packet_sz - 1))))
+#endif
+ ) {
+ /* on dma completion, fifo may not
+ * be available yet ...
+ */
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ break;
+
+ DBG(4, "sending zero pkt\n");
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_MODE
+ | MUSB_TXCSR_TXPKTRDY);
+ request->zero = 0;
+ }
+
+ /* ... or if not, then complete it */
+ musb_g_giveback(musb_ep, request, 0);
+
+ /* kickstart next transfer if appropriate;
+ * the packet that just completed might not
+ * be transmitted for hours or days.
+ * REVISIT for double buffering...
+ * FIXME revisit for stalls too...
+ */
+ musb_ep_select(mbase, epnum);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ break;
+ request = musb_ep->desc
+ ? next_request(musb_ep)
+ : NULL;
+ if (!request) {
+ DBG(4, "%s idle now\n",
+ musb_ep->end_point.name);
+ break;
+ }
+ }
+
+ txstate(musb, to_musb_request(request));
+ }
+
+ } while (0);
+}
+
+/* ------------------------------------------------------------ */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral rx (OUT) using Mentor DMA works as follows:
+ - Only mode 0 is used.
+
+ - Request is queued by the gadget class driver.
+ -> if queue was previously empty, rxstate()
+
+ - Host sends OUT token which causes an endpoint interrupt
+ /\ -> RxReady
+ | -> if request queued, call rxstate
+ | /\ -> setup DMA
+ | | -> DMA interrupt on completion
+ | | -> RxReady
+ | | -> stop DMA
+ | | -> ack the read
+ | | -> if data recd = max expected
+ | | by the request, or host
+ | | sent a short packet,
+ | | complete the request,
+ | | and start the next one.
+ | |_____________________________________|
+ | else just wait for the host
+ | to send the next OUT token.
+ |__________________________________________________|
+
+ * Non-Mentor DMA engines can of course work differently.
+ */
+
+#endif
+
+/*
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void rxstate(struct musb *musb, struct musb_request *req)
+{
+ u16 csr = 0;
+ const u8 epnum = req->epnum;
+ struct usb_request *request = &req->request;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ u16 fifo_count = 0;
+ u16 len = musb_ep->packet_sz;
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+
+ if (is_cppi_enabled() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+
+ /* NOTE: CPPI won't actually stop advancing the DMA
+ * queue after short packet transfers, so this is almost
+ * always going to run as IRQ-per-packet DMA so that
+ * faults will be handled correctly.
+ */
+ if (c->channel_program(channel,
+ musb_ep->packet_sz,
+ !request->short_not_ok,
+ request->dma + request->actual,
+ request->length - request->actual)) {
+
+ /* make sure that if an rxpkt arrived after the irq,
+ * the cppi engine will be ready to take it as soon
+ * as DMA is enabled
+ */
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_DMAMODE);
+ csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ return;
+ }
+ }
+
+ if (csr & MUSB_RXCSR_RXPKTRDY) {
+ len = musb_readw(epio, MUSB_RXCOUNT);
+ if (request->actual < request->length) {
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c;
+ struct dma_channel *channel;
+ int use_dma = 0;
+
+ c = musb->dma_controller;
+ channel = musb_ep->dma;
+
+ /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+ * mode 0 only. So we do not get endpoint interrupts due to DMA
+ * completion. We only get interrupts from DMA controller.
+ *
+ * We could operate in DMA mode 1 if we knew the size of the tranfer
+ * in advance. For mass storage class, request->length = what the host
+ * sends, so that'd work. But for pretty much everything else,
+ * request->length is routinely more than what the host sends. For
+ * most these gadgets, end of is signified either by a short packet,
+ * or filling the last byte of the buffer. (Sending extra data in
+ * that last pckate should trigger an overflow fault.) But in mode 1,
+ * we don't get DMA completion interrrupt for short packets.
+ *
+ * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+ * to get endpoint interrupt on every DMA req, but that didn't seem
+ * to work reliably.
+ *
+ * REVISIT an updated g_file_storage can set req->short_not_ok, which
+ * then becomes usable as a runtime "use mode 1" hint...
+ */
+
+ csr |= MUSB_RXCSR_DMAENAB;
+#ifdef USE_MODE1
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ /* csr |= MUSB_RXCSR_DMAMODE; */
+
+ /* this special sequence (enabling and then
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
+ */
+ musb_writew(epio, MUSB_RXCSR,
+ csr | MUSB_RXCSR_DMAMODE);
+#endif
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ if (request->actual < request->length) {
+ int transfer_size = 0;
+#ifdef USE_MODE1
+ transfer_size = min(request->length,
+ channel->max_len);
+#else
+ transfer_size = len;
+#endif
+ if (transfer_size <= musb_ep->packet_sz)
+ musb_ep->dma->desired_mode = 0;
+ else
+ musb_ep->dma->desired_mode = 1;
+
+ use_dma = c->channel_program(
+ channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ request->dma
+ + request->actual,
+ transfer_size);
+ }
+
+ if (use_dma)
+ return;
+ }
+#endif /* Mentor's DMA */
+
+ fifo_count = request->length - request->actual;
+ DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+ musb_ep->end_point.name,
+ len, fifo_count,
+ musb_ep->packet_sz);
+
+ fifo_count = min(len, fifo_count);
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+ if (tusb_dma_omap() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+ u32 dma_addr = request->dma + request->actual;
+ int ret;
+
+ ret = c->channel_program(channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ dma_addr,
+ fifo_count);
+ if (ret)
+ return;
+ }
+#endif
+
+ musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
+ (request->buf + request->actual));
+ request->actual += fifo_count;
+
+ /* REVISIT if we left anything in the fifo, flush
+ * it and report -EOVERFLOW
+ */
+
+ /* ack the read! */
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~MUSB_RXCSR_RXPKTRDY;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+
+ /* reach the end or short packet detected */
+ if (request->actual == request->length || len < musb_ep->packet_sz)
+ musb_g_giveback(musb_ep, request, 0);
+}
+
+/*
+ * Data ready for a request; called from IRQ
+ */
+void musb_g_rx(struct musb *musb, u8 epnum)
+{
+ u16 csr;
+ struct usb_request *request;
+ void __iomem *mbase = musb->mregs;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+
+ request = next_request(musb_ep);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ dma = is_dma_capable() ? musb_ep->dma : NULL;
+
+ DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+ csr, dma ? " (dma)" : "", request);
+
+ if (csr & MUSB_RXCSR_P_SENTSTALL) {
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ request->actual += musb_ep->dma->actual_len;
+ }
+
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~MUSB_RXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ if (request)
+ musb_g_giveback(musb_ep, request, -EPIPE);
+ goto done;
+ }
+
+ if (csr & MUSB_RXCSR_P_OVERRUN) {
+ /* csr |= MUSB_RXCSR_P_WZC_BITS; */
+ csr &= ~MUSB_RXCSR_P_OVERRUN;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
+ if (request && request->status == -EINPROGRESS)
+ request->status = -EOVERFLOW;
+ }
+ if (csr & MUSB_RXCSR_INCOMPRX) {
+ /* REVISIT not necessarily an error */
+ DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /* "should not happen"; likely RXPKTRDY pending for DMA */
+ DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
+ "%s busy, csr %04x\n",
+ musb_ep->end_point.name, csr);
+ goto done;
+ }
+
+ if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_P_WZC_BITS | csr);
+
+ request->actual += musb_ep->dma->actual_len;
+
+ DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
+ epnum, csr,
+ musb_readw(epio, MUSB_RXCSR),
+ musb_ep->dma->actual_len, request);
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
+ /* Autoclear doesn't clear RxPktRdy for short packets */
+ if ((dma->desired_mode == 0)
+ || (dma->actual_len
+ & (musb_ep->packet_sz - 1))) {
+ /* ack the read! */
+ csr &= ~MUSB_RXCSR_RXPKTRDY;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* incomplete, and not short? wait for next IN packet */
+ if ((request->actual < request->length)
+ && (musb_ep->dma->actual_len
+ == musb_ep->packet_sz))
+ goto done;
+#endif
+ musb_g_giveback(musb_ep, request, 0);
+
+ request = next_request(musb_ep);
+ if (!request)
+ goto done;
+
+ /* don't start more i/o till the stall clears */
+ musb_ep_select(mbase, epnum);
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_P_SENDSTALL)
+ goto done;
+ }
+
+
+ /* analyze request if the ep is hot */
+ if (request)
+ rxstate(musb, to_musb_request(request));
+ else
+ DBG(3, "packet waiting for %s%s request\n",
+ musb_ep->desc ? "" : "inactive ",
+ musb_ep->end_point.name);
+
+done:
+ return;
+}
+
+/* ------------------------------------------------------------ */
+
+static int musb_gadget_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned long flags;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *hw_ep;
+ void __iomem *regs;
+ struct musb *musb;
+ void __iomem *mbase;
+ u8 epnum;
+ u16 csr;
+ unsigned tmp;
+ int status = -EINVAL;
+
+ if (!ep || !desc)
+ return -EINVAL;
+
+ musb_ep = to_musb_ep(ep);
+ hw_ep = musb_ep->hw_ep;
+ regs = hw_ep->regs;
+ musb = musb_ep->musb;
+ mbase = musb->mregs;
+ epnum = musb_ep->current_epnum;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb_ep->desc) {
+ status = -EBUSY;
+ goto fail;
+ }
+ musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* check direction and (later) maxpacket size against endpoint */
+ if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum)
+ goto fail;
+
+ /* REVISIT this rules out high bandwidth periodic transfers */
+ tmp = le16_to_cpu(desc->wMaxPacketSize);
+ if (tmp & ~0x07ff)
+ goto fail;
+ musb_ep->packet_sz = tmp;
+
+ /* enable the interrupts for the endpoint, set the endpoint
+ * packet size (or fail), set the mode, clear the fifo
+ */
+ musb_ep_select(mbase, epnum);
+ if (desc->bEndpointAddress & USB_DIR_IN) {
+ u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep->is_in = 1;
+ if (!musb_ep->is_in)
+ goto fail;
+ if (tmp > hw_ep->max_packet_sz_tx)
+ goto fail;
+
+ int_txe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+ /* REVISIT if can_bulk_split(), use by updating "tmp";
+ * likewise high bandwidth periodic tx
+ */
+ musb_writew(regs, MUSB_TXMAXP, tmp);
+
+ csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
+ if (musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MUSB_TXCSR_P_ISO;
+
+ /* set twice in case of double buffering */
+ musb_writew(regs, MUSB_TXCSR, csr);
+ /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+ musb_writew(regs, MUSB_TXCSR, csr);
+
+ } else {
+ u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep->is_in = 0;
+ if (musb_ep->is_in)
+ goto fail;
+ if (tmp > hw_ep->max_packet_sz_rx)
+ goto fail;
+
+ int_rxe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRRXE, int_rxe);
+
+ /* REVISIT if can_bulk_combine() use by updating "tmp"
+ * likewise high bandwidth periodic rx
+ */
+ musb_writew(regs, MUSB_RXMAXP, tmp);
+
+ /* force shared fifo to OUT-only mode */
+ if (hw_ep->is_shared_fifo) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(regs, MUSB_TXCSR, csr);
+ }
+
+ csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
+ if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MUSB_RXCSR_P_ISO;
+ else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ /* set twice in case of double buffering */
+ musb_writew(regs, MUSB_RXCSR, csr);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ }
+
+ /* NOTE: all the I/O code _should_ work fine without DMA, in case
+ * for some reason you run out of channels here.
+ */
+ if (is_dma_capable() && musb->dma_controller) {
+ struct dma_controller *c = musb->dma_controller;
+
+ musb_ep->dma = c->channel_alloc(c, hw_ep,
+ (desc->bEndpointAddress & USB_DIR_IN));
+ } else
+ musb_ep->dma = NULL;
+
+ musb_ep->desc = desc;
+ musb_ep->busy = 0;
+ status = 0;
+
+ pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
+ musb_driver_name, musb_ep->end_point.name,
+ ({ char *s; switch (musb_ep->type) {
+ case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
+ case USB_ENDPOINT_XFER_INT: s = "int"; break;
+ default: s = "iso"; break;
+ }; s; }),
+ musb_ep->is_in ? "IN" : "OUT",
+ musb_ep->dma ? "dma, " : "",
+ musb_ep->packet_sz);
+
+ schedule_work(&musb->irq_work);
+
+fail:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Disable an endpoint flushing all requests queued.
+ */
+static int musb_gadget_disable(struct usb_ep *ep)
+{
+ unsigned long flags;
+ struct musb *musb;
+ u8 epnum;
+ struct musb_ep *musb_ep;
+ void __iomem *epio;
+ int status = 0;
+
+ musb_ep = to_musb_ep(ep);
+ musb = musb_ep->musb;
+ epnum = musb_ep->current_epnum;
+ epio = musb->endpoints[epnum].regs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_ep_select(musb->mregs, epnum);
+
+ /* zero the endpoint sizes */
+ if (musb_ep->is_in) {
+ u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
+ int_txe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
+ musb_writew(epio, MUSB_TXMAXP, 0);
+ } else {
+ u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
+ int_rxe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
+ musb_writew(epio, MUSB_RXMAXP, 0);
+ }
+
+ musb_ep->desc = NULL;
+
+ /* abort all pending DMA and requests */
+ nuke(musb_ep, -ESHUTDOWN);
+
+ schedule_work(&musb->irq_work);
+
+ spin_unlock_irqrestore(&(musb->lock), flags);
+
+ DBG(2, "%s\n", musb_ep->end_point.name);
+
+ return status;
+}
+
+/*
+ * Allocate a request for an endpoint.
+ * Reused by ep0 code.
+ */
+struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb_request *request = NULL;
+
+ request = kzalloc(sizeof *request, gfp_flags);
+ if (request) {
+ INIT_LIST_HEAD(&request->request.list);
+ request->request.dma = DMA_ADDR_INVALID;
+ request->epnum = musb_ep->current_epnum;
+ request->ep = musb_ep;
+ }
+
+ return &request->request;
+}
+
+/*
+ * Free a request
+ * Reused by ep0 code.
+ */
+void musb_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(to_musb_request(req));
+}
+
+static LIST_HEAD(buffers);
+
+struct free_record {
+ struct list_head list;
+ struct device *dev;
+ unsigned bytes;
+ dma_addr_t dma;
+};
+
+/*
+ * Context: controller locked, IRQs blocked.
+ */
+static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+{
+ DBG(3, "<== %s request %p len %u on hw_ep%d\n",
+ req->tx ? "TX/IN" : "RX/OUT",
+ &req->request, req->request.length, req->epnum);
+
+ musb_ep_select(musb->mregs, req->epnum);
+ if (req->tx)
+ txstate(musb, req);
+ else
+ rxstate(musb, req);
+}
+
+static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep;
+ struct musb_request *request;
+ struct musb *musb;
+ int status = 0;
+ unsigned long lockflags;
+
+ if (!ep || !req)
+ return -EINVAL;
+ if (!req->buf)
+ return -ENODATA;
+
+ musb_ep = to_musb_ep(ep);
+ musb = musb_ep->musb;
+
+ request = to_musb_request(req);
+ request->musb = musb;
+
+ if (request->ep != musb_ep)
+ return -EINVAL;
+
+ DBG(4, "<== to %s request=%p\n", ep->name, req);
+
+ /* request is mine now... */
+ request->request.actual = 0;
+ request->request.status = -EINPROGRESS;
+ request->epnum = musb_ep->current_epnum;
+ request->tx = musb_ep->is_in;
+
+ if (is_dma_capable() && musb_ep->dma) {
+ if (request->request.dma == DMA_ADDR_INVALID) {
+ request->request.dma = dma_map_single(
+ musb->controller,
+ request->request.buf,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->mapped = 1;
+ } else {
+ dma_sync_single_for_device(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->mapped = 0;
+ }
+ } else if (!req->buf) {
+ return -ENODATA;
+ } else
+ request->mapped = 0;
+
+ spin_lock_irqsave(&musb->lock, lockflags);
+
+ /* don't queue if the ep is down */
+ if (!musb_ep->desc) {
+ DBG(4, "req %p queued to %s while ep %s\n",
+ req, ep->name, "disabled");
+ status = -ESHUTDOWN;
+ goto cleanup;
+ }
+
+ /* add request to the list */
+ list_add_tail(&(request->request.list), &(musb_ep->req_list));
+
+ /* it this is the head of the queue, start i/o ... */
+ if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
+ musb_ep_restart(musb, request);
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, lockflags);
+ return status;
+}
+
+static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct usb_request *r;
+ unsigned long flags;
+ int status = 0;
+ struct musb *musb = musb_ep->musb;
+
+ if (!ep || !request || to_musb_request(request)->ep != musb_ep)
+ return -EINVAL;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ list_for_each_entry(r, &musb_ep->req_list, list) {
+ if (r == request)
+ break;
+ }
+ if (r != request) {
+ DBG(3, "request %p not queued to %s\n", request, ep->name);
+ status = -EINVAL;
+ goto done;
+ }
+
+ /* if the hardware doesn't have the request, easy ... */
+ if (musb_ep->req_list.next != &request->list || musb_ep->busy)
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+
+ /* ... else abort the dma transfer ... */
+ else if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+
+ musb_ep_select(musb->mregs, musb_ep->current_epnum);
+ if (c->channel_abort)
+ status = c->channel_abort(musb_ep->dma);
+ else
+ status = -EBUSY;
+ if (status == 0)
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+ } else {
+ /* NOTE: by sticking to easily tested hardware/driver states,
+ * we leave counting of in-flight packets imprecise.
+ */
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+ }
+
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * data but will queue requests.
+ *
+ * exported to ep0 code
+ */
+int musb_gadget_set_halt(struct usb_ep *ep, int value)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ u8 epnum = musb_ep->current_epnum;
+ struct musb *musb = musb_ep->musb;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ void __iomem *mbase;
+ unsigned long flags;
+ u16 csr;
+ struct musb_request *request = NULL;
+ int status = 0;
+
+ if (!ep)
+ return -EINVAL;
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
+ status = -EINVAL;
+ goto done;
+ }
+
+ musb_ep_select(mbase, epnum);
+
+ /* cannot portably stall with non-empty FIFO */
+ request = to_musb_request(next_request(musb_ep));
+ if (value && musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ DBG(3, "%s fifo busy, cannot halt\n", ep->name);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return -EAGAIN;
+ }
+
+ }
+
+ /* set/clear the stall and toggle bits */
+ DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ csr |= MUSB_TXCSR_P_WZC_BITS
+ | MUSB_TXCSR_CLRDATATOG;
+ if (value)
+ csr |= MUSB_TXCSR_P_SENDSTALL;
+ else
+ csr &= ~(MUSB_TXCSR_P_SENDSTALL
+ | MUSB_TXCSR_P_SENTSTALL);
+ csr &= ~MUSB_TXCSR_TXPKTRDY;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_P_WZC_BITS
+ | MUSB_RXCSR_FLUSHFIFO
+ | MUSB_RXCSR_CLRDATATOG;
+ if (value)
+ csr |= MUSB_RXCSR_P_SENDSTALL;
+ else
+ csr &= ~(MUSB_RXCSR_P_SENDSTALL
+ | MUSB_RXCSR_P_SENTSTALL);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+done:
+
+ /* maybe start the first request in the queue */
+ if (!musb_ep->busy && !value && request) {
+ DBG(3, "restarting the request\n");
+ musb_ep_restart(musb, request);
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+static int musb_gadget_fifo_status(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ void __iomem *epio = musb_ep->hw_ep->regs;
+ int retval = -EINVAL;
+
+ if (musb_ep->desc && !musb_ep->is_in) {
+ struct musb *musb = musb_ep->musb;
+ int epnum = musb_ep->current_epnum;
+ void __iomem *mbase = musb->mregs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb_ep_select(mbase, epnum);
+ /* FIXME return zero unless RXPKTRDY is set */
+ retval = musb_readw(epio, MUSB_RXCOUNT);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ return retval;
+}
+
+static void musb_gadget_fifo_flush(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb *musb = musb_ep->musb;
+ u8 epnum = musb_ep->current_epnum;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ void __iomem *mbase;
+ unsigned long flags;
+ u16 csr, int_txe;
+
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_ep_select(mbase, (u8) epnum);
+
+ /* disable interrupts */
+ int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static const struct usb_ep_ops musb_ep_ops = {
+ .enable = musb_gadget_enable,
+ .disable = musb_gadget_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .queue = musb_gadget_queue,
+ .dequeue = musb_gadget_dequeue,
+ .set_halt = musb_gadget_set_halt,
+ .fifo_status = musb_gadget_fifo_status,
+ .fifo_flush = musb_gadget_fifo_flush
+};
+
+/* ----------------------------------------------------------------------- */
+
+static int musb_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ return (int)musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ void __iomem *mregs = musb->mregs;
+ unsigned long flags;
+ int status = -EINVAL;
+ u8 power, devctl;
+ int retries;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_PERIPHERAL:
+ /* NOTE: OTG state machine doesn't include B_SUSPENDED;
+ * that's part of the standard usb 1.1 state machine, and
+ * doesn't affect OTG transitions.
+ */
+ if (musb->may_wakeup && musb->is_suspended)
+ break;
+ goto done;
+ case OTG_STATE_B_IDLE:
+ /* Start SRP ... OTG not required. */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ DBG(2, "Sending SRP: devctl: %02x\n", devctl);
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mregs, MUSB_DEVCTL, devctl);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ retries = 100;
+ while (!(devctl & MUSB_DEVCTL_SESSION)) {
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (retries-- < 1)
+ break;
+ }
+ retries = 10000;
+ while (devctl & MUSB_DEVCTL_SESSION) {
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (retries-- < 1)
+ break;
+ }
+
+ /* Block idling for at least 1s */
+ musb_platform_try_idle(musb,
+ jiffies + msecs_to_jiffies(1 * HZ));
+
+ status = 0;
+ goto done;
+ default:
+ DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
+ goto done;
+ }
+
+ status = 0;
+
+ power = musb_readb(mregs, MUSB_POWER);
+ power |= MUSB_POWER_RESUME;
+ musb_writeb(mregs, MUSB_POWER, power);
+ DBG(2, "issue wakeup\n");
+
+ /* FIXME do this next chunk in a timer callback, no udelay */
+ mdelay(2);
+
+ power = musb_readb(mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ musb_writeb(mregs, MUSB_POWER, power);
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+static int
+musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ musb->is_self_powered = !!is_selfpowered;
+ return 0;
+}
+
+static void musb_pullup(struct musb *musb, int is_on)
+{
+ u8 power;
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ if (is_on)
+ power |= MUSB_POWER_SOFTCONN;
+ else
+ power &= ~MUSB_POWER_SOFTCONN;
+
+ /* FIXME if on, HdrcStart; if off, HdrcStop */
+
+ DBG(3, "gadget %s D+ pullup %s\n",
+ musb->gadget_driver->function, is_on ? "on" : "off");
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+}
+
+#if 0
+static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ DBG(2, "<= %s =>\n", __func__);
+
+ /*
+ * FIXME iff driver's softconnect flag is set (as it is during probe,
+ * though that can clear it), just musb_pullup().
+ */
+
+ return -EINVAL;
+}
+#endif
+
+static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ if (!musb->xceiv.set_power)
+ return -EOPNOTSUPP;
+ return otg_set_power(&musb->xceiv, mA);
+}
+
+static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ unsigned long flags;
+
+ is_on = !!is_on;
+
+ /* NOTE: this assumes we are sensing vbus; we'd rather
+ * not pullup unless the B-session is active.
+ */
+ spin_lock_irqsave(&musb->lock, flags);
+ if (is_on != musb->softconnect) {
+ musb->softconnect = is_on;
+ musb_pullup(musb, is_on);
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static const struct usb_gadget_ops musb_gadget_operations = {
+ .get_frame = musb_gadget_get_frame,
+ .wakeup = musb_gadget_wakeup,
+ .set_selfpowered = musb_gadget_set_self_powered,
+ /* .vbus_session = musb_gadget_vbus_session, */
+ .vbus_draw = musb_gadget_vbus_draw,
+ .pullup = musb_gadget_pullup,
+};
+
+/* ----------------------------------------------------------------------- */
+
+/* Registration */
+
+/* Only this registration code "knows" the rule (from USB standards)
+ * about there being only one external upstream port. It assumes
+ * all peripheral ports are external...
+ */
+static struct musb *the_gadget;
+
+static void musb_gadget_release(struct device *dev)
+{
+ /* kref_put(WHAT) */
+ dev_dbg(dev, "%s\n", __func__);
+}
+
+
+static void __init
+init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
+{
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+
+ memset(ep, 0, sizeof *ep);
+
+ ep->current_epnum = epnum;
+ ep->musb = musb;
+ ep->hw_ep = hw_ep;
+ ep->is_in = is_in;
+
+ INIT_LIST_HEAD(&ep->req_list);
+
+ sprintf(ep->name, "ep%d%s", epnum,
+ (!epnum || hw_ep->is_shared_fifo) ? "" : (
+ is_in ? "in" : "out"));
+ ep->end_point.name = ep->name;
+ INIT_LIST_HEAD(&ep->end_point.ep_list);
+ if (!epnum) {
+ ep->end_point.maxpacket = 64;
+ ep->end_point.ops = &musb_g_ep0_ops;
+ musb->g.ep0 = &ep->end_point;
+ } else {
+ if (is_in)
+ ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
+ else
+ ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
+ ep->end_point.ops = &musb_ep_ops;
+ list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
+ }
+}
+
+/*
+ * Initialize the endpoints exposed to peripheral drivers, with backlinks
+ * to the rest of the driver state.
+ */
+static inline void __init musb_g_init_endpoints(struct musb *musb)
+{
+ u8 epnum;
+ struct musb_hw_ep *hw_ep;
+ unsigned count = 0;
+
+ /* intialize endpoint list just once */
+ INIT_LIST_HEAD(&(musb->g.ep_list));
+
+ for (epnum = 0, hw_ep = musb->endpoints;
+ epnum < musb->nr_endpoints;
+ epnum++, hw_ep++) {
+ if (hw_ep->is_shared_fifo /* || !epnum */) {
+ init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
+ count++;
+ } else {
+ if (hw_ep->max_packet_sz_tx) {
+ init_peripheral_ep(musb, &hw_ep->ep_in,
+ epnum, 1);
+ count++;
+ }
+ if (hw_ep->max_packet_sz_rx) {
+ init_peripheral_ep(musb, &hw_ep->ep_out,
+ epnum, 0);
+ count++;
+ }
+ }
+ }
+}
+
+/* called once during driver setup to initialize and link into
+ * the driver model; memory is zeroed.
+ */
+int __init musb_gadget_setup(struct musb *musb)
+{
+ int status;
+
+ /* REVISIT minor race: if (erroneously) setting up two
+ * musb peripherals at the same time, only the bus lock
+ * is probably held.
+ */
+ if (the_gadget)
+ return -EBUSY;
+ the_gadget = musb;
+
+ musb->g.ops = &musb_gadget_operations;
+ musb->g.is_dualspeed = 1;
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ /* this "gadget" abstracts/virtualizes the controller */
+ strcpy(musb->g.dev.bus_id, "gadget");
+ musb->g.dev.parent = musb->controller;
+ musb->g.dev.dma_mask = musb->controller->dma_mask;
+ musb->g.dev.release = musb_gadget_release;
+ musb->g.name = musb_driver_name;
+
+ if (is_otg_enabled(musb))
+ musb->g.is_otg = 1;
+
+ musb_g_init_endpoints(musb);
+
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, 0);
+
+ status = device_register(&musb->g.dev);
+ if (status != 0)
+ the_gadget = NULL;
+ return status;
+}
+
+void musb_gadget_cleanup(struct musb *musb)
+{
+ if (musb != the_gadget)
+ return;
+
+ device_unregister(&musb->g.dev);
+ the_gadget = NULL;
+}
+
+/*
+ * Register the gadget driver. Used by gadget drivers when
+ * registering themselves with the controller.
+ *
+ * -EINVAL something went wrong (not driver)
+ * -EBUSY another gadget is already using the controller
+ * -ENOMEM no memeory to perform the operation
+ *
+ * @param driver the gadget driver
+ * @return <0 if error, 0 if everything is fine
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ int retval;
+ unsigned long flags;
+ struct musb *musb = the_gadget;
+
+ if (!driver
+ || driver->speed != USB_SPEED_HIGH
+ || !driver->bind
+ || !driver->setup)
+ return -EINVAL;
+
+ /* driver must be initialized to support peripheral mode */
+ if (!musb || !(musb->board_mode == MUSB_OTG
+ || musb->board_mode != MUSB_OTG)) {
+ DBG(1, "%s, no dev??\n", __func__);
+ return -ENODEV;
+ }
+
+ DBG(3, "registering driver %s\n", driver->function);
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb->gadget_driver) {
+ DBG(1, "%s is already bound to %s\n",
+ musb_driver_name,
+ musb->gadget_driver->driver.name);
+ retval = -EBUSY;
+ } else {
+ musb->gadget_driver = driver;
+ musb->g.dev.driver = &driver->driver;
+ driver->driver.bus = NULL;
+ musb->softconnect = 1;
+ retval = 0;
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (retval == 0) {
+ retval = driver->bind(&musb->g);
+ if (retval != 0) {
+ DBG(3, "bind to driver %s failed --> %d\n",
+ driver->driver.name, retval);
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* REVISIT always use otg_set_peripheral(), handling
+ * issues including the root hub one below ...
+ */
+ musb->xceiv.gadget = &musb->g;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->is_active = 1;
+
+ /* FIXME this ignores the softconnect flag. Drivers are
+ * allowed hold the peripheral inactive until for example
+ * userspace hooks up printer hardware or DSP codecs, so
+ * hosts only see fully functional devices.
+ */
+
+ if (!is_otg_enabled(musb))
+ musb_start(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (is_otg_enabled(musb)) {
+ DBG(3, "OTG startup...\n");
+
+ /* REVISIT: funcall to other code, which also
+ * handles power budgeting ... this way also
+ * ensures HdrcStart is indirectly called.
+ */
+ retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+ if (retval < 0) {
+ DBG(1, "add_hcd failed, %d\n", retval);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->xceiv.gadget = NULL;
+ musb->xceiv.state = OTG_STATE_UNDEFINED;
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ }
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
+{
+ int i;
+ struct musb_hw_ep *hw_ep;
+
+ /* don't disconnect if it's not connected */
+ if (musb->g.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ else
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ /* deactivate the hardware */
+ if (musb->softconnect) {
+ musb->softconnect = 0;
+ musb_pullup(musb, 0);
+ }
+ musb_stop(musb);
+
+ /* killing any outstanding requests will quiesce the driver;
+ * then report disconnect
+ */
+ if (driver) {
+ for (i = 0, hw_ep = musb->endpoints;
+ i < musb->nr_endpoints;
+ i++, hw_ep++) {
+ musb_ep_select(musb->mregs, i);
+ if (hw_ep->is_shared_fifo /* || !epnum */) {
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ } else {
+ if (hw_ep->max_packet_sz_tx)
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ if (hw_ep->max_packet_sz_rx)
+ nuke(&hw_ep->ep_out, -ESHUTDOWN);
+ }
+ }
+
+ spin_unlock(&musb->lock);
+ driver->disconnect(&musb->g);
+ spin_lock(&musb->lock);
+ }
+}
+
+/*
+ * Unregister the gadget driver. Used by gadget drivers when
+ * unregistering themselves from the controller.
+ *
+ * @param driver the gadget driver to unregister
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ unsigned long flags;
+ int retval = 0;
+ struct musb *musb = the_gadget;
+
+ if (!driver || !driver->unbind || !musb)
+ return -EINVAL;
+
+ /* REVISIT always use otg_set_peripheral() here too;
+ * this needs to shut down the OTG engine.
+ */
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+#ifdef CONFIG_USB_MUSB_OTG
+ musb_hnp_stop(musb);
+#endif
+
+ if (musb->gadget_driver == driver) {
+
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+ musb->xceiv.state = OTG_STATE_UNDEFINED;
+ stop_activity(musb, driver);
+
+ DBG(3, "unregistering driver %s\n", driver->function);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ driver->unbind(&musb->g);
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, 0);
+ } else
+ retval = -EINVAL;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (is_otg_enabled(musb) && retval == 0) {
+ usb_remove_hcd(musb_to_hcd(musb));
+ /* FIXME we need to be able to register another
+ * gadget driver here and have everything work;
+ * that currently misbehaves.
+ */
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/* ----------------------------------------------------------------------- */
+
+/* lifecycle operations called through plat_uds.c */
+
+void musb_g_resume(struct musb *musb)
+{
+ musb->is_suspended = 0;
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_IDLE:
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ musb->is_active = 1;
+ if (musb->gadget_driver && musb->gadget_driver->resume) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->resume(&musb->g);
+ spin_lock(&musb->lock);
+ }
+ break;
+ default:
+ WARNING("unhandled RESUME transition (%s)\n",
+ otg_state_string(musb));
+ }
+}
+
+/* called when SOF packets stop for 3+ msec */
+void musb_g_suspend(struct musb *musb)
+{
+ u8 devctl;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ DBG(3, "devctl %02x\n", devctl);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_IDLE:
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb->is_suspended = 1;
+ if (musb->gadget_driver && musb->gadget_driver->suspend) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->suspend(&musb->g);
+ spin_lock(&musb->lock);
+ }
+ break;
+ default:
+ /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
+ * A_PERIPHERAL may need care too
+ */
+ WARNING("unhandled SUSPEND transition (%s)\n",
+ otg_state_string(musb));
+ }
+}
+
+/* Called during SRP */
+void musb_g_wakeup(struct musb *musb)
+{
+ musb_gadget_wakeup(&musb->g);
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void musb_g_disconnect(struct musb *musb)
+{
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+
+ DBG(3, "devctl %02x\n", devctl);
+
+ /* clear HR */
+ musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
+
+ /* don't draw vbus until new b-default session */
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+ musb->g.speed = USB_SPEED_UNKNOWN;
+ if (musb->gadget_driver && musb->gadget_driver->disconnect) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->disconnect(&musb->g);
+ spin_lock(&musb->lock);
+ }
+
+ switch (musb->xceiv.state) {
+ default:
+#ifdef CONFIG_USB_MUSB_OTG
+ DBG(2, "Unhandled disconnect %s, setting a_idle\n",
+ otg_state_string(musb));
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_HOST:
+#endif
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_B_SRP_INIT:
+ break;
+ }
+
+ musb->is_active = 0;
+}
+
+void musb_g_reset(struct musb *musb)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
+ u8 power;
+
+ DBG(3, "<== %s addr=%x driver '%s'\n",
+ (devctl & MUSB_DEVCTL_BDEVICE)
+ ? "B-Device" : "A-Device",
+ musb_readb(mbase, MUSB_FADDR),
+ musb->gadget_driver
+ ? musb->gadget_driver->driver.name
+ : NULL
+ );
+
+ /* report disconnect, if we didn't already (flushing EP state) */
+ if (musb->g.speed != USB_SPEED_UNKNOWN)
+ musb_g_disconnect(musb);
+
+ /* clear HR */
+ else if (devctl & MUSB_DEVCTL_HR)
+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+
+
+ /* what speed did we negotiate? */
+ power = musb_readb(mbase, MUSB_POWER);
+ musb->g.speed = (power & MUSB_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ /* start in USB_STATE_DEFAULT */
+ musb->is_active = 1;
+ musb->is_suspended = 0;
+ MUSB_DEV_MODE(musb);
+ musb->address = 0;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+
+ musb->may_wakeup = 0;
+ musb->g.b_hnp_enable = 0;
+ musb->g.a_alt_hnp_support = 0;
+ musb->g.a_hnp_support = 0;
+
+ /* Normal reset, as B-Device;
+ * or else after HNP, as A-Device
+ */
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->g.is_a_peripheral = 0;
+ } else if (is_otg_enabled(musb)) {
+ musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+ musb->g.is_a_peripheral = 1;
+ } else
+ WARN_ON(1);
+
+ /* start with default limits on VBUS power draw */
+ (void) musb_gadget_vbus_draw(&musb->g,
+ is_otg_enabled(musb) ? 8 : 100);
+}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
new file mode 100644
index 000000000000..59502da9f739
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.h
@@ -0,0 +1,108 @@
+/*
+ * MUSB OTG driver peripheral defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_GADGET_H
+#define __MUSB_GADGET_H
+
+struct musb_request {
+ struct usb_request request;
+ struct musb_ep *ep;
+ struct musb *musb;
+ u8 tx; /* endpoint direction */
+ u8 epnum;
+ u8 mapped;
+};
+
+static inline struct musb_request *to_musb_request(struct usb_request *req)
+{
+ return req ? container_of(req, struct musb_request, request) : NULL;
+}
+
+extern struct usb_request *
+musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
+
+
+/*
+ * struct musb_ep - peripheral side view of endpoint rx or tx side
+ */
+struct musb_ep {
+ /* stuff towards the head is basically write-once. */
+ struct usb_ep end_point;
+ char name[12];
+ struct musb_hw_ep *hw_ep;
+ struct musb *musb;
+ u8 current_epnum;
+
+ /* ... when enabled/disabled ... */
+ u8 type;
+ u8 is_in;
+ u16 packet_sz;
+ const struct usb_endpoint_descriptor *desc;
+ struct dma_channel *dma;
+
+ /* later things are modified based on usage */
+ struct list_head req_list;
+
+ /* true if lock must be dropped but req_list may not be advanced */
+ u8 busy;
+};
+
+static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
+{
+ return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
+}
+
+static inline struct usb_request *next_request(struct musb_ep *ep)
+{
+ struct list_head *queue = &ep->req_list;
+
+ if (list_empty(queue))
+ return NULL;
+ return container_of(queue->next, struct usb_request, list);
+}
+
+extern void musb_g_tx(struct musb *musb, u8 epnum);
+extern void musb_g_rx(struct musb *musb, u8 epnum);
+
+extern const struct usb_ep_ops musb_g_ep0_ops;
+
+extern int musb_gadget_setup(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+
+extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+
+extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
+
+#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
new file mode 100644
index 000000000000..48d7d3ccb243
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -0,0 +1,981 @@
+/*
+ * MUSB OTG peripheral driver ep0 handling
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include "musb_core.h"
+
+/* ep0 is always musb->endpoints[0].ep_in */
+#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
+
+/*
+ * locking note: we use only the controller lock, for simpler correctness.
+ * It's always held with IRQs blocked.
+ *
+ * It protects the ep0 request queue as well as ep0_state, not just the
+ * controller and indexed registers. And that lock stays held unless it
+ * needs to be dropped to allow reentering this driver ... like upcalls to
+ * the gadget driver, or adjusting endpoint halt status.
+ */
+
+static char *decode_ep0stage(u8 stage)
+{
+ switch (stage) {
+ case MUSB_EP0_STAGE_SETUP: return "idle";
+ case MUSB_EP0_STAGE_TX: return "in";
+ case MUSB_EP0_STAGE_RX: return "out";
+ case MUSB_EP0_STAGE_ACKWAIT: return "wait";
+ case MUSB_EP0_STAGE_STATUSIN: return "in/status";
+ case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
+ default: return "?";
+ }
+}
+
+/* handle a standard GET_STATUS request
+ * Context: caller holds controller lock
+ */
+static int service_tx_status_request(
+ struct musb *musb,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ void __iomem *mbase = musb->mregs;
+ int handled = 1;
+ u8 result[2], epnum = 0;
+ const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+ result[1] = 0;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
+ result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+#ifdef CONFIG_USB_MUSB_OTG
+ if (musb->g.is_otg) {
+ result[0] |= musb->g.b_hnp_enable
+ << USB_DEVICE_B_HNP_ENABLE;
+ result[0] |= musb->g.a_alt_hnp_support
+ << USB_DEVICE_A_ALT_HNP_SUPPORT;
+ result[0] |= musb->g.a_hnp_support
+ << USB_DEVICE_A_HNP_SUPPORT;
+ }
+#endif
+ break;
+
+ case USB_RECIP_INTERFACE:
+ result[0] = 0;
+ break;
+
+ case USB_RECIP_ENDPOINT: {
+ int is_in;
+ struct musb_ep *ep;
+ u16 tmp;
+ void __iomem *regs;
+
+ epnum = (u8) ctrlrequest->wIndex;
+ if (!epnum) {
+ result[0] = 0;
+ break;
+ }
+
+ is_in = epnum & USB_DIR_IN;
+ if (is_in) {
+ epnum &= 0x0f;
+ ep = &musb->endpoints[epnum].ep_in;
+ } else {
+ ep = &musb->endpoints[epnum].ep_out;
+ }
+ regs = musb->endpoints[epnum].regs;
+
+ if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+ handled = -EINVAL;
+ break;
+ }
+
+ musb_ep_select(mbase, epnum);
+ if (is_in)
+ tmp = musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_P_SENDSTALL;
+ else
+ tmp = musb_readw(regs, MUSB_RXCSR)
+ & MUSB_RXCSR_P_SENDSTALL;
+ musb_ep_select(mbase, 0);
+
+ result[0] = tmp ? 1 : 0;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+
+ /* fill up the fifo; caller updates csr0 */
+ if (handled > 0) {
+ u16 len = le16_to_cpu(ctrlrequest->wLength);
+
+ if (len > 2)
+ len = 2;
+ musb_write_fifo(&musb->endpoints[0], len, result);
+ }
+
+ return handled;
+}
+
+/*
+ * handle a control-IN request, the end0 buffer contains the current request
+ * that is supposed to be a standard control request. Assumes the fifo to
+ * be at least 2 bytes long.
+ *
+ * @return 0 if the request was NOT HANDLED,
+ * < 0 when error
+ * > 0 when the request is processed
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+{
+ int handled = 0; /* not handled */
+
+ if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (ctrlrequest->bRequest) {
+ case USB_REQ_GET_STATUS:
+ handled = service_tx_status_request(musb,
+ ctrlrequest);
+ break;
+
+ /* case USB_REQ_SYNC_FRAME: */
+
+ default:
+ break;
+ }
+ }
+ return handled;
+}
+
+/*
+ * Context: caller holds controller lock
+ */
+static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
+{
+ musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+}
+
+/*
+ * Tries to start B-device HNP negotiation if enabled via sysfs
+ */
+static inline void musb_try_b_hnp_enable(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 devctl;
+
+ DBG(1, "HNP: Setting HR\n");
+ devctl = musb_readb(mbase, MUSB_DEVCTL);
+ musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
+}
+
+/*
+ * Handle all control requests with no DATA stage, including standard
+ * requests such as:
+ * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
+ * always delegated to the gadget driver
+ * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
+ * always handled here, except for class/vendor/... features
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_zero_data_request(struct musb *musb,
+ struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ int handled = -EINVAL;
+ void __iomem *mbase = musb->mregs;
+ const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+ /* the gadget driver handles everything except what we MUST handle */
+ if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (ctrlrequest->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* change it after the status stage */
+ musb->set_address = true;
+ musb->address = (u8) (ctrlrequest->wValue & 0x7f);
+ handled = 1;
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ if (ctrlrequest->wValue
+ != USB_DEVICE_REMOTE_WAKEUP)
+ break;
+ musb->may_wakeup = 0;
+ handled = 1;
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:{
+ const u8 num = ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+
+ if (num == 0
+ || num >= MUSB_C_NUM_EPS
+ || ctrlrequest->wValue
+ != USB_ENDPOINT_HALT)
+ break;
+
+ if (ctrlrequest->wIndex & USB_DIR_IN)
+ musb_ep = &musb->endpoints[num].ep_in;
+ else
+ musb_ep = &musb->endpoints[num].ep_out;
+ if (!musb_ep->desc)
+ break;
+
+ /* REVISIT do it directly, no locking games */
+ spin_unlock(&musb->lock);
+ musb_gadget_set_halt(&musb_ep->end_point, 0);
+ spin_lock(&musb->lock);
+
+ /* select ep0 again */
+ musb_ep_select(mbase, 0);
+ handled = 1;
+ } break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ handled = 1;
+ switch (ctrlrequest->wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ musb->may_wakeup = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (musb->g.speed != USB_SPEED_HIGH)
+ goto stall;
+ if (ctrlrequest->wIndex & 0xff)
+ goto stall;
+
+ switch (ctrlrequest->wIndex >> 8) {
+ case 1:
+ pr_debug("TEST_J\n");
+ /* TEST_J */
+ musb->test_mode_nr =
+ MUSB_TEST_J;
+ break;
+ case 2:
+ /* TEST_K */
+ pr_debug("TEST_K\n");
+ musb->test_mode_nr =
+ MUSB_TEST_K;
+ break;
+ case 3:
+ /* TEST_SE0_NAK */
+ pr_debug("TEST_SE0_NAK\n");
+ musb->test_mode_nr =
+ MUSB_TEST_SE0_NAK;
+ break;
+ case 4:
+ /* TEST_PACKET */
+ pr_debug("TEST_PACKET\n");
+ musb->test_mode_nr =
+ MUSB_TEST_PACKET;
+ break;
+ default:
+ goto stall;
+ }
+
+ /* enter test mode after irq */
+ if (handled > 0)
+ musb->test_mode = true;
+ break;
+#ifdef CONFIG_USB_MUSB_OTG
+ case USB_DEVICE_B_HNP_ENABLE:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.b_hnp_enable = 1;
+ musb_try_b_hnp_enable(musb);
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.a_hnp_support = 1;
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.a_alt_hnp_support = 1;
+ break;
+#endif
+stall:
+ default:
+ handled = -EINVAL;
+ break;
+ }
+ break;
+
+ case USB_RECIP_INTERFACE:
+ break;
+
+ case USB_RECIP_ENDPOINT:{
+ const u8 epnum =
+ ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *ep;
+ void __iomem *regs;
+ int is_in;
+ u16 csr;
+
+ if (epnum == 0
+ || epnum >= MUSB_C_NUM_EPS
+ || ctrlrequest->wValue
+ != USB_ENDPOINT_HALT)
+ break;
+
+ ep = musb->endpoints + epnum;
+ regs = ep->regs;
+ is_in = ctrlrequest->wIndex & USB_DIR_IN;
+ if (is_in)
+ musb_ep = &ep->ep_in;
+ else
+ musb_ep = &ep->ep_out;
+ if (!musb_ep->desc)
+ break;
+
+ musb_ep_select(mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs,
+ MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ csr |= MUSB_TXCSR_P_SENDSTALL
+ | MUSB_TXCSR_CLRDATATOG
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_TXCSR,
+ csr);
+ } else {
+ csr = musb_readw(regs,
+ MUSB_RXCSR);
+ csr |= MUSB_RXCSR_P_SENDSTALL
+ | MUSB_RXCSR_FLUSHFIFO
+ | MUSB_RXCSR_CLRDATATOG
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR,
+ csr);
+ }
+
+ /* select ep0 again */
+ musb_ep_select(mbase, 0);
+ handled = 1;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+ default:
+ /* delegate SET_CONFIGURATION, etc */
+ handled = 0;
+ }
+ } else
+ handled = 0;
+ return handled;
+}
+
+/* we have an ep0out data packet
+ * Context: caller holds controller lock
+ */
+static void ep0_rxstate(struct musb *musb)
+{
+ void __iomem *regs = musb->control_ep->regs;
+ struct usb_request *req;
+ u16 tmp;
+
+ req = next_ep0_request(musb);
+
+ /* read packet and ack; or stall because of gadget driver bug:
+ * should have provided the rx buffer before setup() returned.
+ */
+ if (req) {
+ void *buf = req->buf + req->actual;
+ unsigned len = req->length - req->actual;
+
+ /* read the buffer */
+ tmp = musb_readb(regs, MUSB_COUNT0);
+ if (tmp > len) {
+ req->status = -EOVERFLOW;
+ tmp = len;
+ }
+ musb_read_fifo(&musb->endpoints[0], tmp, buf);
+ req->actual += tmp;
+ tmp = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (tmp < 64 || req->actual == req->length) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ tmp |= MUSB_CSR0_P_DATAEND;
+ } else
+ req = NULL;
+ } else
+ tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
+
+
+ /* Completion handler may choose to stall, e.g. because the
+ * message just received holds invalid data.
+ */
+ if (req) {
+ musb->ackpend = tmp;
+ musb_g_ep0_giveback(musb, req);
+ if (!musb->ackpend)
+ return;
+ musb->ackpend = 0;
+ }
+ musb_writew(regs, MUSB_CSR0, tmp);
+}
+
+/*
+ * transmitting to the host (IN), this code might be called from IRQ
+ * and from kernel thread.
+ *
+ * Context: caller holds controller lock
+ */
+static void ep0_txstate(struct musb *musb)
+{
+ void __iomem *regs = musb->control_ep->regs;
+ struct usb_request *request = next_ep0_request(musb);
+ u16 csr = MUSB_CSR0_TXPKTRDY;
+ u8 *fifo_src;
+ u8 fifo_count;
+
+ if (!request) {
+ /* WARN_ON(1); */
+ DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+ return;
+ }
+
+ /* load the data */
+ fifo_src = (u8 *) request->buf + request->actual;
+ fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
+ request->length - request->actual);
+ musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
+ request->actual += fifo_count;
+
+ /* update the flags */
+ if (fifo_count < MUSB_MAX_END0_PACKET
+ || request->actual == request->length) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+ csr |= MUSB_CSR0_P_DATAEND;
+ } else
+ request = NULL;
+
+ /* report completions as soon as the fifo's loaded; there's no
+ * win in waiting till this last packet gets acked. (other than
+ * very precise fault reporting, needed by USB TMC; possible with
+ * this hardware, but not usable from portable gadget drivers.)
+ */
+ if (request) {
+ musb->ackpend = csr;
+ musb_g_ep0_giveback(musb, request);
+ if (!musb->ackpend)
+ return;
+ musb->ackpend = 0;
+ }
+
+ /* send it out, triggering a "txpktrdy cleared" irq */
+ musb_writew(regs, MUSB_CSR0, csr);
+}
+
+/*
+ * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
+ * Fields are left in USB byte-order.
+ *
+ * Context: caller holds controller lock.
+ */
+static void
+musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
+{
+ struct usb_request *r;
+ void __iomem *regs = musb->control_ep->regs;
+
+ musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
+
+ /* NOTE: earlier 2.6 versions changed setup packets to host
+ * order, but now USB packets always stay in USB byte order.
+ */
+ DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+ req->bRequestType,
+ req->bRequest,
+ le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex),
+ le16_to_cpu(req->wLength));
+
+ /* clean up any leftover transfers */
+ r = next_ep0_request(musb);
+ if (r)
+ musb_g_ep0_giveback(musb, r);
+
+ /* For zero-data requests we want to delay the STATUS stage to
+ * avoid SETUPEND errors. If we read data (OUT), delay accepting
+ * packets until there's a buffer to store them in.
+ *
+ * If we write data, the controller acts happier if we enable
+ * the TX FIFO right away, and give the controller a moment
+ * to switch modes...
+ */
+ musb->set_address = false;
+ musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (req->wLength == 0) {
+ if (req->bRequestType & USB_DIR_IN)
+ musb->ackpend |= MUSB_CSR0_TXPKTRDY;
+ musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
+ } else if (req->bRequestType & USB_DIR_IN) {
+ musb->ep0_state = MUSB_EP0_STAGE_TX;
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
+ while ((musb_readw(regs, MUSB_CSR0)
+ & MUSB_CSR0_RXPKTRDY) != 0)
+ cpu_relax();
+ musb->ackpend = 0;
+ } else
+ musb->ep0_state = MUSB_EP0_STAGE_RX;
+}
+
+static int
+forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ int retval;
+ if (!musb->gadget_driver)
+ return -EOPNOTSUPP;
+ spin_unlock(&musb->lock);
+ retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
+ spin_lock(&musb->lock);
+ return retval;
+}
+
+/*
+ * Handle peripheral ep0 interrupt
+ *
+ * Context: irq handler; we won't re-enter the driver that way.
+ */
+irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+ u16 csr;
+ u16 len;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *regs = musb->endpoints[0].regs;
+ irqreturn_t retval = IRQ_NONE;
+
+ musb_ep_select(mbase, 0); /* select ep0 */
+ csr = musb_readw(regs, MUSB_CSR0);
+ len = musb_readb(regs, MUSB_COUNT0);
+
+ DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
+ csr, len,
+ musb_readb(mbase, MUSB_FADDR),
+ decode_ep0stage(musb->ep0_state));
+
+ /* I sent a stall.. need to acknowledge it now.. */
+ if (csr & MUSB_CSR0_P_SENTSTALL) {
+ musb_writew(regs, MUSB_CSR0,
+ csr & ~MUSB_CSR0_P_SENTSTALL);
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ csr = musb_readw(regs, MUSB_CSR0);
+ }
+
+ /* request ended "early" */
+ if (csr & MUSB_CSR0_P_SETUPEND) {
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ csr = musb_readw(regs, MUSB_CSR0);
+ /* NOTE: request may need completion */
+ }
+
+ /* docs from Mentor only describe tx, rx, and idle/setup states.
+ * we need to handle nuances around status stages, and also the
+ * case where status and setup stages come back-to-back ...
+ */
+ switch (musb->ep0_state) {
+
+ case MUSB_EP0_STAGE_TX:
+ /* irq on clearing txpktrdy */
+ if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
+ ep0_txstate(musb);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MUSB_EP0_STAGE_RX:
+ /* irq on set rxpktrdy */
+ if (csr & MUSB_CSR0_RXPKTRDY) {
+ ep0_rxstate(musb);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MUSB_EP0_STAGE_STATUSIN:
+ /* end of sequence #2 (OUT/RX state) or #3 (no data) */
+
+ /* update address (if needed) only @ the end of the
+ * status phase per usb spec, which also guarantees
+ * we get 10 msec to receive this irq... until this
+ * is done we won't see the next packet.
+ */
+ if (musb->set_address) {
+ musb->set_address = false;
+ musb_writeb(mbase, MUSB_FADDR, musb->address);
+ }
+
+ /* enter test mode if needed (exit by reset) */
+ else if (musb->test_mode) {
+ DBG(1, "entering TESTMODE\n");
+
+ if (MUSB_TEST_PACKET == musb->test_mode_nr)
+ musb_load_testpacket(musb);
+
+ musb_writeb(mbase, MUSB_TESTMODE,
+ musb->test_mode_nr);
+ }
+ /* FALLTHROUGH */
+
+ case MUSB_EP0_STAGE_STATUSOUT:
+ /* end of sequence #1: write to host (TX state) */
+ {
+ struct usb_request *req;
+
+ req = next_ep0_request(musb);
+ if (req)
+ musb_g_ep0_giveback(musb, req);
+ }
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ /* FALLTHROUGH */
+
+ case MUSB_EP0_STAGE_SETUP:
+ if (csr & MUSB_CSR0_RXPKTRDY) {
+ struct usb_ctrlrequest setup;
+ int handled = 0;
+
+ if (len != 8) {
+ ERR("SETUP packet len %d != 8 ?\n", len);
+ break;
+ }
+ musb_read_setup(musb, &setup);
+ retval = IRQ_HANDLED;
+
+ /* sometimes the RESET won't be reported */
+ if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
+ u8 power;
+
+ printk(KERN_NOTICE "%s: peripheral reset "
+ "irq lost!\n",
+ musb_driver_name);
+ power = musb_readb(mbase, MUSB_POWER);
+ musb->g.speed = (power & MUSB_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ }
+
+ switch (musb->ep0_state) {
+
+ /* sequence #3 (no data stage), includes requests
+ * we can't forward (notably SET_ADDRESS and the
+ * device/endpoint feature set/clear operations)
+ * plus SET_CONFIGURATION and others we must
+ */
+ case MUSB_EP0_STAGE_ACKWAIT:
+ handled = service_zero_data_request(
+ musb, &setup);
+
+ /* status stage might be immediate */
+ if (handled > 0) {
+ musb->ackpend |= MUSB_CSR0_P_DATAEND;
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSIN;
+ }
+ break;
+
+ /* sequence #1 (IN to host), includes GET_STATUS
+ * requests that we can't forward, GET_DESCRIPTOR
+ * and others that we must
+ */
+ case MUSB_EP0_STAGE_TX:
+ handled = service_in_request(musb, &setup);
+ if (handled > 0) {
+ musb->ackpend = MUSB_CSR0_TXPKTRDY
+ | MUSB_CSR0_P_DATAEND;
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSOUT;
+ }
+ break;
+
+ /* sequence #2 (OUT from host), always forward */
+ default: /* MUSB_EP0_STAGE_RX */
+ break;
+ }
+
+ DBG(3, "handled %d, csr %04x, ep0stage %s\n",
+ handled, csr,
+ decode_ep0stage(musb->ep0_state));
+
+ /* unless we need to delegate this to the gadget
+ * driver, we know how to wrap this up: csr0 has
+ * not yet been written.
+ */
+ if (handled < 0)
+ goto stall;
+ else if (handled > 0)
+ goto finish;
+
+ handled = forward_to_driver(musb, &setup);
+ if (handled < 0) {
+ musb_ep_select(mbase, 0);
+stall:
+ DBG(3, "stall (%d)\n", handled);
+ musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+finish:
+ musb_writew(regs, MUSB_CSR0,
+ musb->ackpend);
+ musb->ackpend = 0;
+ }
+ }
+ break;
+
+ case MUSB_EP0_STAGE_ACKWAIT:
+ /* This should not happen. But happens with tusb6010 with
+ * g_file_storage and high speed. Do nothing.
+ */
+ retval = IRQ_HANDLED;
+ break;
+
+ default:
+ /* "can't happen" */
+ WARN_ON(1);
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ break;
+ }
+
+ return retval;
+}
+
+
+static int
+musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_disable(struct usb_ep *e)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int
+musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
+{
+ struct musb_ep *ep;
+ struct musb_request *req;
+ struct musb *musb;
+ int status;
+ unsigned long lockflags;
+ void __iomem *regs;
+
+ if (!e || !r)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->musb;
+ regs = musb->control_ep->regs;
+
+ req = to_musb_request(r);
+ req->musb = musb;
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+ req->tx = ep->is_in;
+
+ spin_lock_irqsave(&musb->lock, lockflags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ switch (musb->ep0_state) {
+ case MUSB_EP0_STAGE_RX: /* control-OUT data */
+ case MUSB_EP0_STAGE_TX: /* control-IN data */
+ case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
+ status = 0;
+ break;
+ default:
+ DBG(1, "ep0 request queued in state %d\n",
+ musb->ep0_state);
+ status = -EINVAL;
+ goto cleanup;
+ }
+
+ /* add request to the list */
+ list_add_tail(&(req->request.list), &(ep->req_list));
+
+ DBG(3, "queue to %s (%s), length=%d\n",
+ ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
+ req->request.length);
+
+ musb_ep_select(musb->mregs, 0);
+
+ /* sequence #1, IN ... start writing the data */
+ if (musb->ep0_state == MUSB_EP0_STAGE_TX)
+ ep0_txstate(musb);
+
+ /* sequence #3, no-data ... issue IN status */
+ else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
+ if (req->request.length)
+ status = -EINVAL;
+ else {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ musb_writew(regs, MUSB_CSR0,
+ musb->ackpend | MUSB_CSR0_P_DATAEND);
+ musb->ackpend = 0;
+ musb_g_ep0_giveback(ep->musb, r);
+ }
+
+ /* else for sequence #2 (OUT), caller provides a buffer
+ * before the next packet arrives. deferred responses
+ * (after SETUP is acked) are racey.
+ */
+ } else if (musb->ackpend) {
+ musb_writew(regs, MUSB_CSR0, musb->ackpend);
+ musb->ackpend = 0;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, lockflags);
+ return status;
+}
+
+static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ /* we just won't support this */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_halt(struct usb_ep *e, int value)
+{
+ struct musb_ep *ep;
+ struct musb *musb;
+ void __iomem *base, *regs;
+ unsigned long flags;
+ int status;
+ u16 csr;
+
+ if (!e || !value)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->musb;
+ base = musb->mregs;
+ regs = musb->control_ep->regs;
+ status = 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ musb_ep_select(base, 0);
+ csr = musb->ackpend;
+
+ switch (musb->ep0_state) {
+
+ /* Stalls are usually issued after parsing SETUP packet, either
+ * directly in irq context from setup() or else later.
+ */
+ case MUSB_EP0_STAGE_TX: /* control-IN data */
+ case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
+ case MUSB_EP0_STAGE_RX: /* control-OUT data */
+ csr = musb_readw(regs, MUSB_CSR0);
+ /* FALLTHROUGH */
+
+ /* It's also OK to issue stalls during callbacks when a non-empty
+ * DATA stage buffer has been read (or even written).
+ */
+ case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
+ case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
+
+ csr |= MUSB_CSR0_P_SENDSTALL;
+ musb_writew(regs, MUSB_CSR0, csr);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ackpend = 0;
+ break;
+ default:
+ DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
+ status = -EINVAL;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+const struct usb_ep_ops musb_g_ep0_ops = {
+ .enable = musb_g_ep0_enable,
+ .disable = musb_g_ep0_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .queue = musb_g_ep0_queue,
+ .dequeue = musb_g_ep0_dequeue,
+ .set_halt = musb_g_ep0_halt,
+};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644
index 000000000000..8b4be012669a
--- /dev/null
+++ b/drivers/usb/musb/musb_host.c
@@ -0,0 +1,2170 @@
+/*
+ * MUSB OTG driver host support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include "musb_core.h"
+#include "musb_host.h"
+
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ * they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ * + including ep0, with all usbtest cases 9, 10
+ * + usbtest 14 (ep0out) doesn't seem to run at all
+ * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ * configurations, but otherwise double buffering passes basic tests.
+ * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ * + about 1/15 the speed of typical EHCI implementations (PCI)
+ * + RX, all too often reqpkt seems to misbehave after tx
+ * + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - Still no traffic scheduling code to make NAKing for bulk or control
+ * transfers unable to starve other requests; or to make efficient use
+ * of hardware with periodic transfers. (Note that network drivers
+ * commonly post bulk reads that stay pending for a long time; these
+ * would make very visible trouble.)
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August-2006:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ * extra endpoint for periodic use enabling hub + keybd + mouse. That
+ * mostly works, except that with "usbnet" it's easy to trigger cases
+ * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
+ * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ * although ARP RX wins. (That test was done with a full speed link.)
+ */
+
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ *
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it ... one remote device may easily be NAKing while others
+ * need to perform transfers in that same direction. The same thing could
+ * be done in software though, assuming dma cooperates.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic: the endpoint will be
+ * "claimed" until its software queue is no longer refilled. No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, unsigned int nOut,
+ u8 *buf, u32 len);
+
+/*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+{
+ void __iomem *epio = ep->regs;
+ u16 csr;
+ int retries = 1000;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (retries-- < 1) {
+ ERR("Could not flush host TX fifo: csr: %04x\n", csr);
+ return;
+ }
+ mdelay(1);
+ }
+}
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * musb must be locked.
+ */
+static inline void musb_h_tx_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ if (ep->epnum) {
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+ } else {
+ txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
+ musb_writew(ep->regs, MUSB_CSR0, txcsr);
+ }
+
+}
+
+static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+}
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void
+musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+{
+ u16 frame;
+ u32 len;
+ void *buf;
+ void __iomem *mbase = musb->mregs;
+ struct urb *urb = next_urb(qh);
+ struct musb_hw_ep *hw_ep = qh->hw_ep;
+ unsigned pipe = urb->pipe;
+ u8 address = usb_pipedevice(pipe);
+ int epnum = hw_ep->epnum;
+
+ /* initialize software qh state */
+ qh->offset = 0;
+ qh->segsize = 0;
+
+ /* gather right source of data */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* control transfers always start with SETUP */
+ is_in = 0;
+ hw_ep->out_qh = qh;
+ musb->ep0_stage = MUSB_EP0_START;
+ buf = urb->setup_packet;
+ len = 8;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ qh->iso_idx = 0;
+ qh->frame = 0;
+ buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
+ len = urb->iso_frame_desc[0].length;
+ break;
+ default: /* bulk, interrupt */
+ buf = urb->transfer_buffer;
+ len = urb->transfer_buffer_length;
+ }
+
+ DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+ qh, urb, address, qh->epnum,
+ is_in ? "in" : "out",
+ ({char *s; switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
+ case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
+ case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
+ default: s = "-intr"; break;
+ }; s; }),
+ epnum, buf, len);
+
+ /* Configure endpoint */
+ if (is_in || hw_ep->is_shared_fifo)
+ hw_ep->in_qh = qh;
+ else
+ hw_ep->out_qh = qh;
+ musb_ep_program(musb, epnum, urb, !is_in, buf, len);
+
+ /* transmit may have more work: start it when it is time */
+ if (is_in)
+ return;
+
+ /* determine if the time is right for a periodic transfer */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ DBG(3, "check whether there's still time for periodic Tx\n");
+ qh->iso_idx = 0;
+ frame = musb_readw(mbase, MUSB_FRAME);
+ /* FIXME this doesn't implement that scheduling policy ...
+ * or handle framecounter wrapping
+ */
+ if ((urb->transfer_flags & URB_ISO_ASAP)
+ || (frame >= urb->start_frame)) {
+ /* REVISIT the SOF irq handler shouldn't duplicate
+ * this code; and we don't init urb->start_frame...
+ */
+ qh->frame = 0;
+ goto start;
+ } else {
+ qh->frame = urb->start_frame;
+ /* enable SOF interrupt so we can count down */
+ DBG(1, "SOF for %d\n", epnum);
+#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
+#endif
+ }
+ break;
+ default:
+start:
+ DBG(4, "Start TX%d %s\n", epnum,
+ hw_ep->tx_channel ? "dma" : "pio");
+
+ if (!hw_ep->tx_channel)
+ musb_h_tx_start(hw_ep);
+ else if (is_cppi_enabled() || tusb_dma_omap())
+ cppi_host_txdma_start(hw_ep);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static void
+__musb_giveback(struct musb *musb, struct urb *urb, int status)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ DBG(({ int level; switch (urb->status) {
+ case 0:
+ level = 4;
+ break;
+ /* common/boring faults */
+ case -EREMOTEIO:
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ case -EPIPE:
+ level = 3;
+ break;
+ default:
+ level = 2;
+ break;
+ }; level; }),
+ "complete %p (%d), dev%d ep%d%s, %d/%d\n",
+ urb, urb->status,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->actual_length, urb->transfer_buffer_length
+ );
+
+ spin_unlock(&musb->lock);
+ usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+ spin_lock(&musb->lock);
+}
+
+/* for bulk/interrupt endpoints only */
+static inline void
+musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+{
+ struct usb_device *udev = urb->dev;
+ u16 csr;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *qh;
+
+ /* FIXME: the current Mentor DMA code seems to have
+ * problems getting toggle correct.
+ */
+
+ if (is_in || ep->is_shared_fifo)
+ qh = ep->in_qh;
+ else
+ qh = ep->out_qh;
+
+ if (!is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ usb_settoggle(udev, qh->epnum, 1,
+ (csr & MUSB_TXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ usb_settoggle(udev, qh->epnum, 0,
+ (csr & MUSB_RXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static struct musb_qh *
+musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+{
+ int is_in;
+ struct musb_hw_ep *ep = qh->hw_ep;
+ struct musb *musb = ep->musb;
+ int ready = qh->is_ready;
+
+ if (ep->is_shared_fifo)
+ is_in = 1;
+ else
+ is_in = usb_pipein(urb->pipe);
+
+ /* save toggle eagerly, for paranoia */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ musb_save_toggle(ep, is_in, urb);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (status == 0 && urb->error_count)
+ status = -EXDEV;
+ break;
+ }
+
+ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+
+ qh->is_ready = 0;
+ __musb_giveback(musb, urb, status);
+ qh->is_ready = ready;
+
+ /* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ * invalidate qh as soon as list_empty(&hep->urb_list)
+ */
+ if (list_empty(&qh->hep->urb_list)) {
+ struct list_head *head;
+
+ if (is_in)
+ ep->rx_reinit = 1;
+ else
+ ep->tx_reinit = 1;
+
+ /* clobber old pointers to this qh */
+ if (is_in || ep->is_shared_fifo)
+ ep->in_qh = NULL;
+ else
+ ep->out_qh = NULL;
+ qh->hep->hcpriv = NULL;
+
+ switch (qh->type) {
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ /* this is where periodic bandwidth should be
+ * de-allocated if it's tracked and allocated;
+ * and where we'd update the schedule tree...
+ */
+ musb->periodic[ep->epnum] = NULL;
+ kfree(qh);
+ qh = NULL;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ /* fifo policy for these lists, except that NAKing
+ * should rotate a qh to the end (for fairness).
+ */
+ head = qh->ring.prev;
+ list_del(&qh->ring);
+ kfree(qh);
+ qh = first_qh(head);
+ break;
+ }
+ }
+ return qh;
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified urb and
+ * advancing to either the next urb queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, irqs are blocked
+ */
+static void
+musb_advance_schedule(struct musb *musb, struct urb *urb,
+ struct musb_hw_ep *hw_ep, int is_in)
+{
+ struct musb_qh *qh;
+
+ if (is_in || hw_ep->is_shared_fifo)
+ qh = hw_ep->in_qh;
+ else
+ qh = hw_ep->out_qh;
+
+ if (urb->status == -EINPROGRESS)
+ qh = musb_giveback(qh, urb, 0);
+ else
+ qh = musb_giveback(qh, urb, urb->status);
+
+ if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
+ DBG(4, "... next ep%d %cX urb %p\n",
+ hw_ep->epnum, is_in ? 'R' : 'T',
+ next_urb(qh));
+ musb_start_urb(musb, is_in, qh);
+ }
+}
+
+static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+{
+ /* we don't want fifo to fill itself again;
+ * ignore dma (various models),
+ * leave toggle alone (may not have been saved yet)
+ */
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
+ csr &= ~(MUSB_RXCSR_H_REQPKT
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+
+ /* write 2x to allow double buffering */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+
+ /* flush writebuffer */
+ return musb_readw(hw_ep->regs, MUSB_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static bool
+musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+{
+ u16 rx_count;
+ u8 *buf;
+ u16 csr;
+ bool done = false;
+ u32 length;
+ int do_flush = 0;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ int pipe = urb->pipe;
+ void *buffer = urb->transfer_buffer;
+
+ /* musb_ep_select(mbase, epnum); */
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+ DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ urb->transfer_buffer, qh->offset,
+ urb->transfer_buffer_length);
+
+ /* unload FIFO */
+ if (usb_pipeisoc(pipe)) {
+ int status = 0;
+ struct usb_iso_packet_descriptor *d;
+
+ if (iso_err) {
+ status = -EILSEQ;
+ urb->error_count++;
+ }
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ buf = buffer + d->offset;
+ length = d->length;
+ if (rx_count > length) {
+ if (status == 0) {
+ status = -EOVERFLOW;
+ urb->error_count++;
+ }
+ DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ d->actual_length = length;
+
+ d->status = status;
+
+ /* see if we are done */
+ done = (++qh->iso_idx >= urb->number_of_packets);
+ } else {
+ /* non-isoch */
+ buf = buffer + qh->offset;
+ length = urb->transfer_buffer_length - qh->offset;
+ if (rx_count > length) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = -EOVERFLOW;
+ DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ qh->offset += length;
+
+ /* see if we are done */
+ done = (urb->actual_length == urb->transfer_buffer_length)
+ || (rx_count < qh->maxpacket)
+ || (urb->status != -EINPROGRESS);
+ if (done
+ && (urb->status == -EINPROGRESS)
+ && (urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (urb->actual_length
+ < urb->transfer_buffer_length))
+ urb->status = -EREMOTEIO;
+ }
+
+ musb_read_fifo(hw_ep, length, buf);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ if (unlikely(do_flush))
+ musb_h_flush_rxfifo(hw_ep, csr);
+ else {
+ /* REVISIT this assumes AUTOCLEAR is never set */
+ csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
+ if (!done)
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ return done;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+{
+ u16 csr;
+
+ /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
+ * That always uses tx_reinit since ep0 repurposes TX register
+ * offsets; the initial SETUP packet is also a kind of OUT.
+ */
+
+ /* if programmed for Tx, put it in RX mode */
+ if (ep->is_shared_fifo) {
+ csr = musb_readw(ep->regs, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_MODE) {
+ musb_h_tx_flush_fifo(ep);
+ musb_writew(ep->regs, MUSB_TXCSR,
+ MUSB_TXCSR_FRCDATATOG);
+ }
+ /* clear mode (and everything else) to enable Rx */
+ musb_writew(ep->regs, MUSB_TXCSR, 0);
+
+ /* scrub all previous state, clearing toggle */
+ } else {
+ csr = musb_readw(ep->regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY)
+ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+ musb_readw(ep->regs, MUSB_RXCOUNT));
+
+ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
+ qh->addr_reg);
+ musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
+ qh->h_addr_reg);
+ musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
+ qh->h_port_reg);
+ } else
+ musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint, interval/NAKlimit, i/o size */
+ musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
+ musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
+ /* NOTE: bulk combining rewrites high bits of maxpacket */
+ musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+
+ ep->rx_reinit = 0;
+}
+
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ */
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, unsigned int is_out,
+ u8 *buf, u32 len)
+{
+ struct dma_controller *dma_controller;
+ struct dma_channel *dma_channel;
+ u8 dma_ok;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh;
+ u16 packet_sz;
+
+ if (!is_out || hw_ep->is_shared_fifo)
+ qh = hw_ep->in_qh;
+ else
+ qh = hw_ep->out_qh;
+
+ packet_sz = qh->maxpacket;
+
+ DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "h_addr%02x h_port%02x bytes %d\n",
+ is_out ? "-->" : "<--",
+ epnum, urb, urb->dev->speed,
+ qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+ qh->h_addr_reg, qh->h_port_reg,
+ len);
+
+ musb_ep_select(mbase, epnum);
+
+ /* candidate for DMA? */
+ dma_controller = musb->dma_controller;
+ if (is_dma_capable() && epnum && dma_controller) {
+ dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
+ if (!dma_channel) {
+ dma_channel = dma_controller->channel_alloc(
+ dma_controller, hw_ep, is_out);
+ if (is_out)
+ hw_ep->tx_channel = dma_channel;
+ else
+ hw_ep->rx_channel = dma_channel;
+ }
+ } else
+ dma_channel = NULL;
+
+ /* make sure we clear DMAEnab, autoSet bits from previous run */
+
+ /* OUT/transmit/EP0 or IN/receive? */
+ if (is_out) {
+ u16 csr;
+ u16 int_txe;
+ u16 load_count;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* disable interrupt in case we flush */
+ int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+ /* general endpoint setup */
+ if (epnum) {
+ /* ASSERT: TXCSR_DMAENAB was already cleared */
+
+ /* flush all old state, set default */
+ musb_h_tx_flush_fifo(hw_ep);
+ csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_FRCDATATOG
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY
+ );
+ csr |= MUSB_TXCSR_MODE;
+
+ if (usb_gettoggle(urb->dev,
+ qh->epnum, 1))
+ csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr |= MUSB_TXCSR_CLRDATATOG;
+
+ /* twice in case of double packet buffering */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ } else {
+ /* endpoint 0: just flush */
+ musb_writew(epio, MUSB_CSR0,
+ csr | MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0,
+ csr | MUSB_CSR0_FLUSHFIFO);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
+ qh->addr_reg);
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
+ qh->h_addr_reg);
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
+ qh->h_port_reg);
+/* FIXME if !epnum, do the same for RX ... */
+ } else
+ musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint/interval/NAKlimit */
+ if (epnum) {
+ musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+ if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP,
+ packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
+ else
+ musb_writew(epio, MUSB_TXMAXP,
+ packet_sz);
+ musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+ } else {
+ musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+ if (musb->is_multipoint)
+ musb_writeb(epio, MUSB_TYPE0,
+ qh->type_reg);
+ }
+
+ if (can_bulk_split(musb, qh->type))
+ load_count = min((u32) hw_ep->max_packet_sz_tx,
+ len);
+ else
+ load_count = min((u32) packet_sz, len);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma_channel) {
+
+ /* clear previous state */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_DMAENAB);
+ csr |= MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR,
+ csr | MUSB_TXCSR_MODE);
+
+ qh->segsize = min(len, dma_channel->max_len);
+
+ if (qh->segsize <= packet_sz)
+ dma_channel->desired_mode = 0;
+ else
+ dma_channel->desired_mode = 1;
+
+
+ if (dma_channel->desired_mode == 0) {
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE);
+ csr |= (MUSB_TXCSR_DMAENAB);
+ /* against programming guide */
+ } else
+ csr |= (MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE);
+
+ musb_writew(epio, MUSB_TXCSR, csr);
+
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ dma_channel->desired_mode,
+ urb->transfer_dma,
+ qh->segsize);
+ if (dma_ok) {
+ load_count = 0;
+ } else {
+ dma_controller->channel_release(dma_channel);
+ if (is_out)
+ hw_ep->tx_channel = NULL;
+ else
+ hw_ep->rx_channel = NULL;
+ dma_channel = NULL;
+ }
+ }
+#endif
+
+ /* candidate for DMA */
+ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+
+ /* program endpoint CSRs first, then setup DMA.
+ * assume CPPI setup succeeds.
+ * defer enabling dma.
+ */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_DMAENAB);
+ csr |= MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR,
+ csr | MUSB_TXCSR_MODE);
+
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* TX uses "rndis" mode automatically, but needs help
+ * to identify the zero-length-final-packet case.
+ */
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ (urb->transfer_flags
+ & URB_ZERO_PACKET)
+ == URB_ZERO_PACKET,
+ urb->transfer_dma,
+ qh->segsize);
+ if (dma_ok) {
+ load_count = 0;
+ } else {
+ dma_controller->channel_release(dma_channel);
+ hw_ep->tx_channel = NULL;
+ dma_channel = NULL;
+
+ /* REVISIT there's an error path here that
+ * needs handling: can't do dma, but
+ * there's no pio buffer address...
+ */
+ }
+ }
+
+ if (load_count) {
+ /* ASSERT: TXCSR_DMAENAB was already cleared */
+
+ /* PIO to load FIFO */
+ qh->segsize = load_count;
+ musb_write_fifo(hw_ep, load_count, buf);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_AUTOSET);
+ /* write CSR */
+ csr |= MUSB_TXCSR_MODE;
+
+ if (epnum)
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+ /* IN/receive */
+ } else {
+ u16 csr;
+
+ if (hw_ep->rx_reinit) {
+ musb_rx_reinit(musb, qh, hw_ep);
+
+ /* init new state: toggle and NYET, maybe DMA later */
+ if (usb_gettoggle(urb->dev, qh->epnum, 0))
+ csr = MUSB_RXCSR_H_WR_DATATOGGLE
+ | MUSB_RXCSR_H_DATATOGGLE;
+ else
+ csr = 0;
+ if (qh->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ } else {
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+ if (csr & (MUSB_RXCSR_RXPKTRDY
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_REQPKT))
+ ERR("broken !rx_reinit, ep%d csr %04x\n",
+ hw_ep->epnum, csr);
+
+ /* scrub any stale state, leaving toggle alone */
+ csr &= MUSB_RXCSR_DISNYET;
+ }
+
+ /* kick things off */
+
+ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+ /* candidate for DMA */
+ if (dma_channel) {
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* AUTOREQ is in a DMA register */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs,
+ MUSB_RXCSR);
+
+ /* unless caller treats short rx transfers as
+ * errors, we dare not queue multiple transfers.
+ */
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ !(urb->transfer_flags
+ & URB_SHORT_NOT_OK),
+ urb->transfer_dma,
+ qh->segsize);
+ if (!dma_ok) {
+ dma_controller->channel_release(
+ dma_channel);
+ hw_ep->rx_channel = NULL;
+ dma_channel = NULL;
+ } else
+ csr |= MUSB_RXCSR_DMAENAB;
+ }
+ }
+
+ csr |= MUSB_RXCSR_H_REQPKT;
+ DBG(7, "RXCSR%d := %04x\n", epnum, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ }
+}
+
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * Return true until it's time to start the status stage.
+ */
+static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+{
+ bool more = false;
+ u8 *fifo_dest = NULL;
+ u16 fifo_count = 0;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ struct musb_qh *qh = hw_ep->in_qh;
+ struct usb_ctrlrequest *request;
+
+ switch (musb->ep0_stage) {
+ case MUSB_EP0_IN:
+ fifo_dest = urb->transfer_buffer + urb->actual_length;
+ fifo_count = min(len, ((u16) (urb->transfer_buffer_length
+ - urb->actual_length)));
+ if (fifo_count < len)
+ urb->status = -EOVERFLOW;
+
+ musb_read_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ if (len < qh->maxpacket) {
+ /* always terminate on short read; it's
+ * rarely reported as an error.
+ */
+ } else if (urb->actual_length <
+ urb->transfer_buffer_length)
+ more = true;
+ break;
+ case MUSB_EP0_START:
+ request = (struct usb_ctrlrequest *) urb->setup_packet;
+
+ if (!request->wLength) {
+ DBG(4, "start no-DATA\n");
+ break;
+ } else if (request->bRequestType & USB_DIR_IN) {
+ DBG(4, "start IN-DATA\n");
+ musb->ep0_stage = MUSB_EP0_IN;
+ more = true;
+ break;
+ } else {
+ DBG(4, "start OUT-DATA\n");
+ musb->ep0_stage = MUSB_EP0_OUT;
+ more = true;
+ }
+ /* FALLTHROUGH */
+ case MUSB_EP0_OUT:
+ fifo_count = min(qh->maxpacket, ((u16)
+ (urb->transfer_buffer_length
+ - urb->actual_length)));
+
+ if (fifo_count) {
+ fifo_dest = (u8 *) (urb->transfer_buffer
+ + urb->actual_length);
+ DBG(3, "Sending %d bytes to %p\n",
+ fifo_count, fifo_dest);
+ musb_write_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ more = true;
+ }
+ break;
+ default:
+ ERR("bogus ep0 stage %d\n", musb->ep0_stage);
+ break;
+ }
+
+ return more;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from the LinuxIsr() interrupt service routine.
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+ struct urb *urb;
+ u16 csr, len;
+ int status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ bool complete = false;
+ irqreturn_t retval = IRQ_NONE;
+
+ /* ep0 only has one queue, "in" */
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, 0);
+ csr = musb_readw(epio, MUSB_CSR0);
+ len = (csr & MUSB_CSR0_RXPKTRDY)
+ ? musb_readb(epio, MUSB_COUNT0)
+ : 0;
+
+ DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ csr, qh, len, urb, musb->ep0_stage);
+
+ /* if we just did status stage, we are done */
+ if (MUSB_EP0_STATUS == musb->ep0_stage) {
+ retval = IRQ_HANDLED;
+ complete = true;
+ }
+
+ /* prepare status */
+ if (csr & MUSB_CSR0_H_RXSTALL) {
+ DBG(6, "STALLING ENDPOINT\n");
+ status = -EPIPE;
+
+ } else if (csr & MUSB_CSR0_H_ERROR) {
+ DBG(2, "no response, csr0 %04x\n", csr);
+ status = -EPROTO;
+
+ } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
+ DBG(2, "control NAK timeout\n");
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * control transfer, if another one is queued, so that
+ * ep0 is more likely to stay busy.
+ *
+ * if (qh->ring.next != &musb->control), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_writew(epio, MUSB_CSR0, 0);
+ retval = IRQ_HANDLED;
+ }
+
+ if (status) {
+ DBG(6, "aborting\n");
+ retval = IRQ_HANDLED;
+ if (urb)
+ urb->status = status;
+ complete = true;
+
+ /* use the proper sequence to abort the transfer */
+ if (csr & MUSB_CSR0_H_REQPKT) {
+ csr &= ~MUSB_CSR0_H_REQPKT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ } else {
+ csr |= MUSB_CSR0_FLUSHFIFO;
+ musb_writew(epio, MUSB_CSR0, csr);
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ }
+
+ musb_writeb(epio, MUSB_NAKLIMIT0, 0);
+
+ /* clear it */
+ musb_writew(epio, MUSB_CSR0, 0);
+ }
+
+ if (unlikely(!urb)) {
+ /* stop endpoint since we have no place for its data, this
+ * SHOULD NEVER HAPPEN! */
+ ERR("no URB for end 0\n");
+
+ musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0, 0);
+
+ goto done;
+ }
+
+ if (!complete) {
+ /* call common logic and prepare response */
+ if (musb_h_ep0_continue(musb, len, urb)) {
+ /* more packets required */
+ csr = (MUSB_EP0_IN == musb->ep0_stage)
+ ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
+ } else {
+ /* data transfer complete; perform status phase */
+ if (usb_pipeout(urb->pipe)
+ || !urb->transfer_buffer_length)
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_H_REQPKT;
+ else
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_TXPKTRDY;
+
+ /* flag status stage */
+ musb->ep0_stage = MUSB_EP0_STATUS;
+
+ DBG(5, "ep0 STATUS, csr %04x\n", csr);
+
+ }
+ musb_writew(epio, MUSB_CSR0, csr);
+ retval = IRQ_HANDLED;
+ } else
+ musb->ep0_stage = MUSB_EP0_IDLE;
+
+ /* call completion handler if done */
+ if (complete)
+ musb_advance_schedule(musb, urb, hw_ep, 1);
+done:
+ return retval;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, Program Endpoint
+ - ... which starts DMA to fifo in mode 1 or 0
+
+ DMA Isr (transfer complete) -> TxAvail()
+ - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
+ only in musb_cleanup_urb)
+ - TxPktRdy has to be set in mode 0 or for
+ short packets in mode 1.
+*/
+
+#endif
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musb_host_tx(struct musb *musb, u8 epnum)
+{
+ int pipe;
+ bool done = false;
+ u16 tx_csr;
+ size_t wLength = 0;
+ u8 *buf = NULL;
+ struct urb *urb;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->out_qh;
+ u32 status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct dma_channel *dma;
+
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, epnum);
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* with CPPI, DMA sometimes triggers "extra" irqs */
+ if (!urb) {
+ DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ goto finish;
+ }
+
+ pipe = urb->pipe;
+ dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
+ DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+ dma ? ", dma" : "");
+
+ /* check for errors */
+ if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
+ /* dma was disabled, fifo flushed */
+ DBG(3, "TX end %d stall\n", epnum);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
+ /* (NON-ISO) dma was disabled, fifo flushed */
+ DBG(3, "TX 3strikes on ep=%d\n", epnum);
+
+ status = -ETIMEDOUT;
+
+ } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
+ DBG(6, "TX end=%d device not responding\n", epnum);
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS
+ | MUSB_TXCSR_TXPKTRDY);
+ goto finish;
+ }
+
+ if (status) {
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ }
+
+ /* do the proper sequence to abort the transfer in the
+ * usb core; the dma engine should already be stopped.
+ */
+ musb_h_tx_flush_fifo(hw_ep);
+ tx_csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ );
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ musb_writeb(epio, MUSB_TXINTERVAL, 0);
+
+ done = true;
+ }
+
+ /* second cppi case */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ goto finish;
+
+ }
+
+ /* REVISIT this looks wrong... */
+ if (!status || dma || usb_pipeisoc(pipe)) {
+ if (dma)
+ wLength = dma->actual_len;
+ else
+ wLength = qh->segsize;
+ qh->offset += wLength;
+
+ if (usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length = qh->segsize;
+ if (++qh->iso_idx >= urb->number_of_packets) {
+ done = true;
+ } else {
+ d++;
+ buf = urb->transfer_buffer + d->offset;
+ wLength = d->length;
+ }
+ } else if (dma) {
+ done = true;
+ } else {
+ /* see if we need to send more data, or ZLP */
+ if (qh->segsize < qh->maxpacket)
+ done = true;
+ else if (qh->offset == urb->transfer_buffer_length
+ && !(urb->transfer_flags
+ & URB_ZERO_PACKET))
+ done = true;
+ if (!done) {
+ buf = urb->transfer_buffer
+ + qh->offset;
+ wLength = urb->transfer_buffer_length
+ - qh->offset;
+ }
+ }
+ }
+
+ /* urb->status != -EINPROGRESS means request has been faulted,
+ * so we must abort this transfer after cleanup
+ */
+ if (urb->status != -EINPROGRESS) {
+ done = true;
+ if (status == 0)
+ status = urb->status;
+ }
+
+ if (done) {
+ /* set status */
+ urb->status = status;
+ urb->actual_length = qh->offset;
+ musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
+
+ } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
+ /* WARN_ON(!buf); */
+
+ /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
+ * (and presumably, fifo is not half-full) we should write TWO
+ * packets before updating TXCSR ... other docs disagree ...
+ */
+ /* PIO: start next packet in this URB */
+ wLength = min(qh->maxpacket, (u16) wLength);
+ musb_write_fifo(hw_ep, wLength, buf);
+ qh->segsize = wLength;
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
+ } else
+ DBG(1, "not complete, but dma enabled?\n");
+
+finish:
+ return;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side RX (IN) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, ProgramEndpoint
+ - first IN token is sent out (by setting ReqPkt)
+ LinuxIsr -> RxReady()
+ /\ => first packet is received
+ | - Set in mode 0 (DmaEnab, ~ReqPkt)
+ | -> DMA Isr (transfer complete) -> RxReady()
+ | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
+ | - if urb not complete, send next IN token (ReqPkt)
+ | | else complete urb.
+ | |
+ ---------------------------
+ *
+ * Nuances of mode 1:
+ * For short packets, no ack (+RxPktRdy) is sent automatically
+ * (even if AutoClear is ON)
+ * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
+ * automatically => major problem, as collecting the next packet becomes
+ * difficult. Hence mode 1 is not used.
+ *
+ * REVISIT
+ * All we care about at this driver level is that
+ * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
+ * (b) termination conditions are: short RX, or buffer full;
+ * (c) fault modes include
+ * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
+ * (and that endpoint's dma queue stops immediately)
+ * - overflow (full, PLUS more bytes in the terminal packet)
+ *
+ * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
+ * thus be a great candidate for using mode 1 ... for all but the
+ * last packet of one URB's transfer.
+ */
+
+#endif
+
+/*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+ */
+void musb_host_rx(struct musb *musb, u8 epnum)
+{
+ struct urb *urb;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ size_t xfer_len;
+ void __iomem *mbase = musb->mregs;
+ int pipe;
+ u16 rx_csr, val;
+ bool iso_err = false;
+ bool done = false;
+ u32 status;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+
+ urb = next_urb(qh);
+ dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
+ status = 0;
+ xfer_len = 0;
+
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ val = rx_csr;
+
+ if (unlikely(!urb)) {
+ /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+ * usbtest #11 (unlinks) triggers it regularly, sometimes
+ * with fifo full. (Only with DMA??)
+ */
+ DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+ musb_readw(epio, MUSB_RXCOUNT));
+ musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+ return;
+ }
+
+ pipe = urb->pipe;
+
+ DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
+ epnum, rx_csr, urb->actual_length,
+ dma ? dma->actual_len : 0);
+
+ /* check for errors, concurrent stall & unlink is not really
+ * handled yet! */
+ if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
+ DBG(3, "RX end %d STALL\n", epnum);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
+ DBG(3, "end %d RX proto error\n", epnum);
+
+ status = -EPROTO;
+ musb_writeb(epio, MUSB_RXINTERVAL, 0);
+
+ } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
+
+ if (USB_ENDPOINT_XFER_ISOC != qh->type) {
+ /* NOTE this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) rx urb
+ * that could use this fifo. (dma complicates it...)
+ *
+ * if (bulk && qh->ring.next != &musb->in_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ DBG(6, "RX end %d NAK timeout\n", epnum);
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS
+ | MUSB_RXCSR_H_REQPKT);
+
+ goto finish;
+ } else {
+ DBG(4, "RX end %d ISO data error\n", epnum);
+ /* packet error reported later */
+ iso_err = true;
+ }
+ }
+
+ /* faults abort the transfer */
+ if (status) {
+ /* clean up dma and collect transfer count */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ xfer_len = dma->actual_len;
+ }
+ musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+ musb_writeb(epio, MUSB_RXINTERVAL, 0);
+ done = true;
+ goto finish;
+ }
+
+ if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
+ /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
+ ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
+ goto finish;
+ }
+
+ /* thorough shutdown for now ... given more precise fault handling
+ * and better queueing support, we might keep a DMA pipeline going
+ * while processing this irq for earlier completions.
+ */
+
+ /* FIXME this is _way_ too much in-line logic for Mentor DMA */
+
+#ifndef CONFIG_USB_INVENTRA_DMA
+ if (rx_csr & MUSB_RXCSR_H_REQPKT) {
+ /* REVISIT this happened for a while on some short reads...
+ * the cleanup still needs investigation... looks bad...
+ * and also duplicates dma cleanup code above ... plus,
+ * shouldn't this be the "half full" double buffer case?
+ */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ xfer_len = dma->actual_len;
+ done = true;
+ }
+
+ DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+ xfer_len, dma ? ", dma" : "");
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | rx_csr);
+ }
+#endif
+ if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
+ xfer_len = dma->actual_len;
+
+ val &= ~(MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_RXPKTRDY);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ /* done if urb buffer is full or short packet is recd */
+ done = (urb->actual_length + xfer_len >=
+ urb->transfer_buffer_length
+ || dma->actual_len < qh->maxpacket);
+
+ /* send IN token for next packet, without AUTOREQ */
+ if (!done) {
+ val |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | val);
+ }
+
+ DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+ done ? "off" : "reset",
+ musb_readw(epio, MUSB_RXCSR),
+ musb_readw(epio, MUSB_RXCOUNT));
+#else
+ done = true;
+#endif
+ } else if (urb->status == -EINPROGRESS) {
+ /* if no errors, be sure a packet is ready for unloading */
+ if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
+ status = -EPROTO;
+ ERR("Rx interrupt with no errors or packet!\n");
+
+ /* FIXME this is another "SHOULD NEVER HAPPEN" */
+
+/* SCRUB (RX) */
+ /* do the proper sequence to abort the transfer */
+ musb_ep_select(mbase, epnum);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, val);
+ goto finish;
+ }
+
+ /* we are expecting IN packets */
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma) {
+ struct dma_controller *c;
+ u16 rx_count;
+ int ret;
+
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+
+ DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
+ epnum, rx_count,
+ urb->transfer_dma
+ + urb->actual_length,
+ qh->offset,
+ urb->transfer_buffer_length);
+
+ c = musb->dma_controller;
+
+ dma->desired_mode = 0;
+#ifdef USE_MODE1
+ /* because of the issue below, mode 1 will
+ * only rarely behave with correct semantics.
+ */
+ if ((urb->transfer_flags &
+ URB_SHORT_NOT_OK)
+ && (urb->transfer_buffer_length -
+ urb->actual_length)
+ > qh->maxpacket)
+ dma->desired_mode = 1;
+#endif
+
+/* Disadvantage of using mode 1:
+ * It's basically usable only for mass storage class; essentially all
+ * other protocols also terminate transfers on short packets.
+ *
+ * Details:
+ * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
+ * If you try to use mode 1 for (transfer_buffer_length - 512), and try
+ * to use the extra IN token to grab the last packet using mode 0, then
+ * the problem is that you cannot be sure when the device will send the
+ * last packet and RxPktRdy set. Sometimes the packet is recd too soon
+ * such that it gets lost when RxCSR is re-set at the end of the mode 1
+ * transfer, while sometimes it is recd just a little late so that if you
+ * try to configure for mode 0 soon after the mode 1 transfer is
+ * completed, you will find rxcount 0. Okay, so you might think why not
+ * wait for an interrupt when the pkt is recd. Well, you won't get any!
+ */
+
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+
+ if (dma->desired_mode == 0)
+ val &= ~MUSB_RXCSR_H_AUTOREQ;
+ else
+ val |= MUSB_RXCSR_H_AUTOREQ;
+ val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | val);
+
+ /* REVISIT if when actual_length != 0,
+ * transfer_buffer_length needs to be
+ * adjusted first...
+ */
+ ret = c->channel_program(
+ dma, qh->maxpacket,
+ dma->desired_mode,
+ urb->transfer_dma
+ + urb->actual_length,
+ (dma->desired_mode == 0)
+ ? rx_count
+ : urb->transfer_buffer_length);
+
+ if (!ret) {
+ c->channel_release(dma);
+ hw_ep->rx_channel = NULL;
+ dma = NULL;
+ /* REVISIT reset CSR */
+ }
+ }
+#endif /* Mentor DMA */
+
+ if (!dma) {
+ done = musb_host_packet_rx(musb, urb,
+ epnum, iso_err);
+ DBG(6, "read %spacket\n", done ? "last " : "");
+ }
+ }
+
+ if (dma && usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+ int iso_stat = status;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length += xfer_len;
+ if (iso_err) {
+ iso_stat = -EILSEQ;
+ urb->error_count++;
+ }
+ d->status = iso_stat;
+ }
+
+finish:
+ urb->actual_length += xfer_len;
+ qh->offset += xfer_len;
+ if (done) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = status;
+ musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
+ }
+}
+
+/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
+ * the software schedule associates multiple such nodes with a given
+ * host side hardware endpoint + direction; scheduling may activate
+ * that hardware endpoint.
+ */
+static int musb_schedule(
+ struct musb *musb,
+ struct musb_qh *qh,
+ int is_in)
+{
+ int idle;
+ int best_diff;
+ int best_end, epnum;
+ struct musb_hw_ep *hw_ep = NULL;
+ struct list_head *head = NULL;
+
+ /* use fixed hardware for control and bulk */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ head = &musb->control;
+ hw_ep = musb->control_ep;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ hw_ep = musb->bulk_ep;
+ if (is_in)
+ head = &musb->in_bulk;
+ else
+ head = &musb->out_bulk;
+ break;
+ }
+ if (head) {
+ idle = list_empty(head);
+ list_add_tail(&qh->ring, head);
+ goto success;
+ }
+
+ /* else, periodic transfers get muxed to other endpoints */
+
+ /* FIXME this doesn't consider direction, so it can only
+ * work for one half of the endpoint hardware, and assumes
+ * the previous cases handled all non-shared endpoints...
+ */
+
+ /* we know this qh hasn't been scheduled, so all we need to do
+ * is choose which hardware endpoint to put it on ...
+ *
+ * REVISIT what we really want here is a regular schedule tree
+ * like e.g. OHCI uses, but for now musb->periodic is just an
+ * array of the _single_ logical endpoint associated with a
+ * given physical one (identity mapping logical->physical).
+ *
+ * that simplistic approach makes TT scheduling a lot simpler;
+ * there is none, and thus none of its complexity...
+ */
+ best_diff = 4096;
+ best_end = -1;
+
+ for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+ int diff;
+
+ if (musb->periodic[epnum])
+ continue;
+ hw_ep = &musb->endpoints[epnum];
+ if (hw_ep == musb->bulk_ep)
+ continue;
+
+ if (is_in)
+ diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+ else
+ diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+
+ if (diff > 0 && best_diff > diff) {
+ best_diff = diff;
+ best_end = epnum;
+ }
+ }
+ if (best_end < 0)
+ return -ENOSPC;
+
+ idle = 1;
+ hw_ep = musb->endpoints + best_end;
+ musb->periodic[best_end] = qh;
+ DBG(4, "qh %p periodic slot %d\n", qh, best_end);
+success:
+ qh->hw_ep = hw_ep;
+ qh->hep->hcpriv = qh;
+ if (idle)
+ musb_start_urb(musb, is_in, qh);
+ return 0;
+}
+
+static int musb_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ struct usb_host_endpoint *hep = urb->ep;
+ struct musb_qh *qh = hep->hcpriv;
+ struct usb_endpoint_descriptor *epd = &hep->desc;
+ int ret;
+ unsigned type_reg;
+ unsigned interval;
+
+ /* host role must be active */
+ if (!is_host_active(musb) || !musb->is_active)
+ return -ENODEV;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ if (ret)
+ return ret;
+
+ /* DMA mapping was already done, if needed, and this urb is on
+ * hep->urb_list ... so there's little to do unless hep wasn't
+ * yet scheduled onto a live qh.
+ *
+ * REVISIT best to keep hep->hcpriv valid until the endpoint gets
+ * disabled, testing for empty qh->ring and avoiding qh setup costs
+ * except for the first urb queued after a config change.
+ */
+ if (qh) {
+ urb->hcpriv = qh;
+ return 0;
+ }
+
+ /* Allocate and initialize qh, minimizing the work done each time
+ * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
+ *
+ * REVISIT consider a dedicated qh kmem_cache, so it's harder
+ * for bugs in other kernel code to break this driver...
+ */
+ qh = kzalloc(sizeof *qh, mem_flags);
+ if (!qh) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ return -ENOMEM;
+ }
+
+ qh->hep = hep;
+ qh->dev = urb->dev;
+ INIT_LIST_HEAD(&qh->ring);
+ qh->is_ready = 1;
+
+ qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+
+ /* no high bandwidth support yet */
+ if (qh->maxpacket & ~0x7ff) {
+ ret = -EMSGSIZE;
+ goto done;
+ }
+
+ qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
+ qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+
+ /* precompute rxtype/txtype/type0 register */
+ type_reg = (qh->type << 4) | qh->epnum;
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ type_reg |= 0xc0;
+ break;
+ case USB_SPEED_FULL:
+ type_reg |= 0x80;
+ break;
+ default:
+ type_reg |= 0x40;
+ }
+ qh->type_reg = type_reg;
+
+ /* precompute rxinterval/txinterval register */
+ interval = min((u8)16, epd->bInterval); /* log encoding */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ /* fullspeed uses linear encoding */
+ if (USB_SPEED_FULL == urb->dev->speed) {
+ interval = epd->bInterval;
+ if (!interval)
+ interval = 1;
+ }
+ /* FALLTHROUGH */
+ case USB_ENDPOINT_XFER_ISOC:
+ /* iso always uses log encoding */
+ break;
+ default:
+ /* REVISIT we actually want to use NAK limits, hinting to the
+ * transfer scheduling logic to try some other qh, e.g. try
+ * for 2 msec first:
+ *
+ * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
+ *
+ * The downside of disabling this is that transfer scheduling
+ * gets VERY unfair for nonperiodic transfers; a misbehaving
+ * peripheral could make that hurt. Or for reads, one that's
+ * perfectly normal: network and other drivers keep reads
+ * posted at all times, having one pending for a week should
+ * be perfectly safe.
+ *
+ * The upside of disabling it is avoidng transfer scheduling
+ * code to put this aside for while.
+ */
+ interval = 0;
+ }
+ qh->intv_reg = interval;
+
+ /* precompute addressing for external hub/tt ports */
+ if (musb->is_multipoint) {
+ struct usb_device *parent = urb->dev->parent;
+
+ if (parent != hcd->self.root_hub) {
+ qh->h_addr_reg = (u8) parent->devnum;
+
+ /* set up tt info if needed */
+ if (urb->dev->tt) {
+ qh->h_port_reg = (u8) urb->dev->ttport;
+ qh->h_addr_reg |= 0x80;
+ }
+ }
+ }
+
+ /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
+ * until we get real dma queues (with an entry for each urb/buffer),
+ * we only have work to do in the former case.
+ */
+ spin_lock_irqsave(&musb->lock, flags);
+ if (hep->hcpriv) {
+ /* some concurrent activity submitted another urb to hep...
+ * odd, rare, error prone, but legal.
+ */
+ kfree(qh);
+ ret = 0;
+ } else
+ ret = musb_schedule(musb, qh,
+ epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
+
+ if (ret == 0) {
+ urb->hcpriv = qh;
+ /* FIXME set urb->start_frame for iso/intr, it's tested in
+ * musb_start_urb(), but otherwise only konicawc cares ...
+ */
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+done:
+ if (ret != 0) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ kfree(qh);
+ }
+ return ret;
+}
+
+
+/*
+ * abort a transfer that's at the head of a hardware queue.
+ * called with controller locked, irqs blocked
+ * that hardware queue advances to the next transfer, unless prevented
+ */
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+{
+ struct musb_hw_ep *ep = qh->hw_ep;
+ void __iomem *epio = ep->regs;
+ unsigned hw_end = ep->epnum;
+ void __iomem *regs = ep->musb->mregs;
+ u16 csr;
+ int status = 0;
+
+ musb_ep_select(regs, hw_end);
+
+ if (is_dma_capable()) {
+ struct dma_channel *dma;
+
+ dma = is_in ? ep->rx_channel : ep->tx_channel;
+ if (dma) {
+ status = ep->musb->dma_controller->channel_abort(dma);
+ DBG(status ? 1 : 3,
+ "abort %cX%d DMA for urb %p --> %d\n",
+ is_in ? 'R' : 'T', ep->epnum,
+ urb, status);
+ urb->actual_length += dma->actual_len;
+ }
+ }
+
+ /* turn off DMA requests, discard state, stop polling ... */
+ if (is_in) {
+ /* giveback saves bulk toggle */
+ csr = musb_h_flush_rxfifo(ep, 0);
+
+ /* REVISIT we still get an irq; should likely clear the
+ * endpoint's irq status here to avoid bogus irqs.
+ * clearing that status is platform-specific...
+ */
+ } else {
+ musb_h_tx_flush_fifo(ep);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* flush cpu writebuffer */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ }
+ if (status == 0)
+ musb_advance_schedule(ep->musb, urb, ep, is_in);
+ return status;
+}
+
+static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ struct musb_qh *qh;
+ struct list_head *sched;
+ unsigned long flags;
+ int ret;
+
+ DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto done;
+
+ qh = urb->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* Any URB not actively programmed into endpoint hardware can be
+ * immediately given back. Such an URB must be at the head of its
+ * endpoint queue, unless someday we get real DMA queues. And even
+ * then, it might not be known to the hardware...
+ *
+ * Otherwise abort current transfer, pending dma, etc.; urb->status
+ * has already been updated. This is a synchronous abort; it'd be
+ * OK to hold off until after some IRQ, though.
+ */
+ if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
+ ret = -EINPROGRESS;
+ else {
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ sched = &musb->control;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_pipein(urb->pipe))
+ sched = &musb->in_bulk;
+ else
+ sched = &musb->out_bulk;
+ break;
+ default:
+ /* REVISIT when we get a schedule tree, periodic
+ * transfers won't always be at the head of a
+ * singleton queue...
+ */
+ sched = NULL;
+ break;
+ }
+ }
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+ if (ret < 0 || (sched && qh != first_qh(sched))) {
+ int ready = qh->is_ready;
+
+ ret = 0;
+ qh->is_ready = 0;
+ __musb_giveback(musb, urb, 0);
+ qh->is_ready = ready;
+ } else
+ ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return ret;
+}
+
+/* disable an endpoint */
+static void
+musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+ u8 epnum = hep->desc.bEndpointAddress;
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ u8 is_in = epnum & USB_DIR_IN;
+ struct musb_qh *qh = hep->hcpriv;
+ struct urb *urb, *tmp;
+ struct list_head *sched;
+
+ if (!qh)
+ return;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ sched = &musb->control;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (is_in)
+ sched = &musb->in_bulk;
+ else
+ sched = &musb->out_bulk;
+ break;
+ default:
+ /* REVISIT when we get a schedule tree, periodic transfers
+ * won't always be at the head of a singleton queue...
+ */
+ sched = NULL;
+ break;
+ }
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+
+ /* kick first urb off the hardware, if needed */
+ qh->is_ready = 0;
+ if (!sched || qh == first_qh(sched)) {
+ urb = next_urb(qh);
+
+ /* make software (then hardware) stop ASAP */
+ if (!urb->unlinked)
+ urb->status = -ESHUTDOWN;
+
+ /* cleanup */
+ musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ } else
+ urb = NULL;
+
+ /* then just nuke all the others */
+ list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
+ musb_giveback(qh, urb, -ESHUTDOWN);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int musb_h_get_frame_number(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ return musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_h_start(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ /* NOTE: musb_start() is called when the hub driver turns
+ * on port power, or when (OTG) peripheral starts.
+ */
+ hcd->state = HC_STATE_RUNNING;
+ musb->port1_status = 0;
+ return 0;
+}
+
+static void musb_h_stop(struct usb_hcd *hcd)
+{
+ musb_stop(hcd_to_musb(hcd));
+ hcd->state = HC_STATE_HALT;
+}
+
+static int musb_bus_suspend(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+ return 0;
+
+ if (is_host_active(musb) && musb->is_active) {
+ WARNING("trying to suspend as %s is_active=%i\n",
+ otg_state_string(musb), musb->is_active);
+ return -EBUSY;
+ } else
+ return 0;
+}
+
+static int musb_bus_resume(struct usb_hcd *hcd)
+{
+ /* resuming child port does the work */
+ return 0;
+}
+
+const struct hc_driver musb_hc_driver = {
+ .description = "musb-hcd",
+ .product_desc = "MUSB HDRC host driver",
+ .hcd_priv_size = sizeof(struct musb),
+ .flags = HCD_USB2 | HCD_MEMORY,
+
+ /* not using irq handler or reset hooks from usbcore, since
+ * those must be shared with peripheral code for OTG configs
+ */
+
+ .start = musb_h_start,
+ .stop = musb_h_stop,
+
+ .get_frame_number = musb_h_get_frame_number,
+
+ .urb_enqueue = musb_urb_enqueue,
+ .urb_dequeue = musb_urb_dequeue,
+ .endpoint_disable = musb_h_disable,
+
+ .hub_status_data = musb_hub_status_data,
+ .hub_control = musb_hub_control,
+ .bus_suspend = musb_bus_suspend,
+ .bus_resume = musb_bus_resume,
+ /* .start_port_reset = NULL, */
+ /* .hub_irq_enable = NULL, */
+};
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
new file mode 100644
index 000000000000..77bcdb9d5b32
--- /dev/null
+++ b/drivers/usb/musb/musb_host.h
@@ -0,0 +1,110 @@
+/*
+ * MUSB OTG driver host defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MUSB_HOST_H
+#define _MUSB_HOST_H
+
+static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
+{
+ return container_of((void *) musb, struct usb_hcd, hcd_priv);
+}
+
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return (struct musb *) (hcd->hcd_priv);
+}
+
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
+struct musb_qh {
+ struct usb_host_endpoint *hep; /* usbcore info */
+ struct usb_device *dev;
+ struct musb_hw_ep *hw_ep; /* current binding */
+
+ struct list_head ring; /* of musb_qh */
+ /* struct musb_qh *next; */ /* for periodic tree */
+
+ unsigned offset; /* in urb->transfer_buffer */
+ unsigned segsize; /* current xfer fragment */
+
+ u8 type_reg; /* {rx,tx} type register */
+ u8 intv_reg; /* {rx,tx} interval register */
+ u8 addr_reg; /* device address register */
+ u8 h_addr_reg; /* hub address register */
+ u8 h_port_reg; /* hub port register */
+
+ u8 is_ready; /* safe to modify hw_ep */
+ u8 type; /* XFERTYPE_* */
+ u8 epnum;
+ u16 maxpacket;
+ u16 frame; /* for periodic schedule */
+ unsigned iso_idx; /* in urb->iso_frame_desc[] */
+};
+
+/* map from control or bulk queue head to the first qh on that ring */
+static inline struct musb_qh *first_qh(struct list_head *q)
+{
+ if (list_empty(q))
+ return NULL;
+ return list_entry(q->next, struct musb_qh, ring);
+}
+
+
+extern void musb_root_disconnect(struct musb *musb);
+
+struct usb_hcd;
+
+extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int musb_hub_control(struct usb_hcd *hcd,
+ u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+
+extern const struct hc_driver musb_hc_driver;
+
+static inline struct urb *next_urb(struct musb_qh *qh)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ struct list_head *queue;
+
+ if (!qh)
+ return NULL;
+ queue = &qh->hep->urb_list;
+ if (list_empty(queue))
+ return NULL;
+ return list_entry(queue->next, struct urb, urb_list);
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
new file mode 100644
index 000000000000..6bbedae83af8
--- /dev/null
+++ b/drivers/usb/musb/musb_io.h
@@ -0,0 +1,115 @@
+/*
+ * MUSB OTG driver register I/O
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
+#define __MUSB_LINUX_PLATFORM_ARCH_H__
+
+#include <linux/io.h>
+
+#ifndef CONFIG_ARM
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+ { insl((unsigned long)addr, buf, len); }
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+ { insw((unsigned long)addr, buf, len); }
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+ { insb((unsigned long)addr, buf, len); }
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+ { outsl((unsigned long)addr, buf, len); }
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+ { outsw((unsigned long)addr, buf, len); }
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+ { outsb((unsigned long)addr, buf, len); }
+
+#endif
+
+/* NOTE: these offsets are all in bytes */
+
+static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+ { return __raw_readw(addr + offset); }
+
+static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+ { return __raw_readl(addr + offset); }
+
+
+static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+ { __raw_writew(data, addr + offset); }
+
+static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+ { __raw_writel(data, addr + offset); }
+
+
+#ifdef CONFIG_USB_TUSB6010
+
+/*
+ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+ */
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+{
+ u16 tmp;
+ u8 val;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ val = (tmp >> 8);
+ else
+ val = tmp & 0xff;
+
+ return val;
+}
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ u16 tmp;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ tmp = (data << 8) | (tmp & 0xff);
+ else
+ tmp = (tmp & 0xff00) | data;
+
+ __raw_writew(tmp, addr + (offset & ~1));
+}
+
+#else
+
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+ { return __raw_readb(addr + offset); }
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+ { __raw_writeb(data, addr + offset); }
+
+#endif /* CONFIG_USB_TUSB6010 */
+
+#endif
diff --git a/drivers/usb/musb/musb_procfs.c b/drivers/usb/musb/musb_procfs.c
new file mode 100644
index 000000000000..55e6b78bdccc
--- /dev/null
+++ b/drivers/usb/musb/musb_procfs.c
@@ -0,0 +1,830 @@
+/*
+ * MUSB OTG driver debug support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h> /* FIXME remove procfs writes */
+#include <asm/arch/hardware.h>
+
+#include "musb_core.h"
+
+#include "davinci.h"
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+static int dump_qh(struct musb_qh *qh, char *buf, unsigned max)
+{
+ int count;
+ int tmp;
+ struct usb_host_endpoint *hep = qh->hep;
+ struct urb *urb;
+
+ count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n",
+ qh, qh->dev->devnum, qh->epnum,
+ ({ char *s; switch (qh->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ s = "-bulk"; break;
+ case USB_ENDPOINT_XFER_INT:
+ s = "-int"; break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ s = ""; break;
+ default:
+ s = "iso"; break;
+ }; s; }),
+ qh->maxpacket);
+ if (count <= 0)
+ return 0;
+ buf += count;
+ max -= count;
+
+ list_for_each_entry(urb, &hep->urb_list, urb_list) {
+ tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n",
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length);
+ if (tmp <= 0)
+ break;
+ tmp = min(tmp, (int)max);
+ count += tmp;
+ buf += tmp;
+ max -= tmp;
+ }
+ return count;
+}
+
+static int
+dump_queue(struct list_head *q, char *buf, unsigned max)
+{
+ int count = 0;
+ struct musb_qh *qh;
+
+ list_for_each_entry(qh, q, ring) {
+ int tmp;
+
+ tmp = dump_qh(qh, buf, max);
+ if (tmp <= 0)
+ break;
+ tmp = min(tmp, (int)max);
+ count += tmp;
+ buf += tmp;
+ max -= tmp;
+ }
+ return count;
+}
+
+#endif /* HCD */
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max)
+{
+ char *buf = buffer;
+ int code = 0;
+ void __iomem *regs = ep->hw_ep->regs;
+ char *mode = "1buf";
+
+ if (ep->is_in) {
+ if (ep->hw_ep->tx_double_buffered)
+ mode = "2buf";
+ } else {
+ if (ep->hw_ep->rx_double_buffered)
+ mode = "2buf";
+ }
+
+ do {
+ struct usb_request *req;
+
+ code = snprintf(buf, max,
+ "\n%s (hw%d): %s%s, csr %04x maxp %04x\n",
+ ep->name, ep->current_epnum,
+ mode, ep->dma ? " dma" : "",
+ musb_readw(regs,
+ (ep->is_in || !ep->current_epnum)
+ ? MUSB_TXCSR
+ : MUSB_RXCSR),
+ musb_readw(regs, ep->is_in
+ ? MUSB_TXMAXP
+ : MUSB_RXMAXP)
+ );
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+
+ if (is_cppi_enabled() && ep->current_epnum) {
+ unsigned cppi = ep->current_epnum - 1;
+ void __iomem *base = ep->musb->ctrl_base;
+ unsigned off1 = cppi << 2;
+ void __iomem *ram = base;
+ char tmp[16];
+
+ if (ep->is_in) {
+ ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi);
+ tmp[0] = 0;
+ } else {
+ ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi);
+ snprintf(tmp, sizeof tmp, "%d left, ",
+ musb_readl(base,
+ DAVINCI_RXCPPI_BUFCNT0_REG + off1));
+ }
+
+ code = snprintf(buf, max, "%cX DMA%d: %s"
+ "%08x %08x, %08x %08x; "
+ "%08x %08x %08x .. %08x\n",
+ ep->is_in ? 'T' : 'R',
+ ep->current_epnum - 1, tmp,
+ musb_readl(ram, 0 * 4),
+ musb_readl(ram, 1 * 4),
+ musb_readl(ram, 2 * 4),
+ musb_readl(ram, 3 * 4),
+ musb_readl(ram, 4 * 4),
+ musb_readl(ram, 5 * 4),
+ musb_readl(ram, 6 * 4),
+ musb_readl(ram, 7 * 4));
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+
+ if (list_empty(&ep->req_list)) {
+ code = snprintf(buf, max, "\t(queue empty)\n");
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ break;
+ }
+ list_for_each_entry(req, &ep->req_list, list) {
+ code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n",
+ req,
+ req->zero ? "zero, " : "",
+ req->short_not_ok ? "!short, " : "",
+ req->actual, req->length);
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ } while (0);
+ return buf - buffer;
+}
+#endif
+
+static int
+dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max)
+{
+ int code = 0;
+ char *buf = aBuffer;
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ do {
+ musb_ep_select(musb->mregs, epnum);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (is_host_active(musb)) {
+ int dump_rx, dump_tx;
+ void __iomem *regs = hw_ep->regs;
+
+ /* TEMPORARY (!) until we have a real periodic
+ * schedule tree ...
+ */
+ if (!epnum) {
+ /* control is shared, uses RX queue
+ * but (mostly) shadowed tx registers
+ */
+ dump_tx = !list_empty(&musb->control);
+ dump_rx = 0;
+ } else if (hw_ep == musb->bulk_ep) {
+ dump_tx = !list_empty(&musb->out_bulk);
+ dump_rx = !list_empty(&musb->in_bulk);
+ } else if (musb->periodic[epnum]) {
+ struct usb_host_endpoint *hep;
+
+ hep = musb->periodic[epnum]->hep;
+ dump_rx = hep->desc.bEndpointAddress
+ & USB_ENDPOINT_DIR_MASK;
+ dump_tx = !dump_rx;
+ } else
+ break;
+ /* END TEMPORARY */
+
+
+ if (dump_rx) {
+ code = snprintf(buf, max,
+ "\nRX%d: %s rxcsr %04x interval %02x "
+ "max %04x type %02x; "
+ "dev %d hub %d port %d"
+ "\n",
+ epnum,
+ hw_ep->rx_double_buffered
+ ? "2buf" : "1buf",
+ musb_readw(regs, MUSB_RXCSR),
+ musb_readb(regs, MUSB_RXINTERVAL),
+ musb_readw(regs, MUSB_RXMAXP),
+ musb_readb(regs, MUSB_RXTYPE),
+ /* FIXME: assumes multipoint */
+ musb_readb(musb->mregs,
+ MUSB_BUSCTL_OFFSET(epnum,
+ MUSB_RXFUNCADDR)),
+ musb_readb(musb->mregs,
+ MUSB_BUSCTL_OFFSET(epnum,
+ MUSB_RXHUBADDR)),
+ musb_readb(musb->mregs,
+ MUSB_BUSCTL_OFFSET(epnum,
+ MUSB_RXHUBPORT))
+ );
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+
+ if (is_cppi_enabled()
+ && epnum
+ && hw_ep->rx_channel) {
+ unsigned cppi = epnum - 1;
+ unsigned off1 = cppi << 2;
+ void __iomem *base;
+ void __iomem *ram;
+ char tmp[16];
+
+ base = musb->ctrl_base;
+ ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
+ cppi) + base;
+ snprintf(tmp, sizeof tmp, "%d left, ",
+ musb_readl(base,
+ DAVINCI_RXCPPI_BUFCNT0_REG
+ + off1));
+
+ code = snprintf(buf, max,
+ " rx dma%d: %s"
+ "%08x %08x, %08x %08x; "
+ "%08x %08x %08x .. %08x\n",
+ cppi, tmp,
+ musb_readl(ram, 0 * 4),
+ musb_readl(ram, 1 * 4),
+ musb_readl(ram, 2 * 4),
+ musb_readl(ram, 3 * 4),
+ musb_readl(ram, 4 * 4),
+ musb_readl(ram, 5 * 4),
+ musb_readl(ram, 6 * 4),
+ musb_readl(ram, 7 * 4));
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+
+ if (hw_ep == musb->bulk_ep
+ && !list_empty(
+ &musb->in_bulk)) {
+ code = dump_queue(&musb->in_bulk,
+ buf, max);
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ } else if (musb->periodic[epnum]) {
+ code = dump_qh(musb->periodic[epnum],
+ buf, max);
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ }
+
+ if (dump_tx) {
+ code = snprintf(buf, max,
+ "\nTX%d: %s txcsr %04x interval %02x "
+ "max %04x type %02x; "
+ "dev %d hub %d port %d"
+ "\n",
+ epnum,
+ hw_ep->tx_double_buffered
+ ? "2buf" : "1buf",
+ musb_readw(regs, MUSB_TXCSR),
+ musb_readb(regs, MUSB_TXINTERVAL),
+ musb_readw(regs, MUSB_TXMAXP),
+ musb_readb(regs, MUSB_TXTYPE),
+ /* FIXME: assumes multipoint */
+ musb_readb(musb->mregs,
+ MUSB_BUSCTL_OFFSET(epnum,
+ MUSB_TXFUNCADDR)),
+ musb_readb(musb->mregs,
+ MUSB_BUSCTL_OFFSET(epnum,
+ MUSB_TXHUBADDR)),
+ musb_readb(musb->mregs,
+ MUSB_BUSCTL_OFFSET(epnum,
+ MUSB_TXHUBPORT))
+ );
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+
+ if (is_cppi_enabled()
+ && epnum
+ && hw_ep->tx_channel) {
+ unsigned cppi = epnum - 1;
+ void __iomem *base;
+ void __iomem *ram;
+
+ base = musb->ctrl_base;
+ ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
+ cppi) + base;
+ code = snprintf(buf, max,
+ " tx dma%d: "
+ "%08x %08x, %08x %08x; "
+ "%08x %08x %08x .. %08x\n",
+ cppi,
+ musb_readl(ram, 0 * 4),
+ musb_readl(ram, 1 * 4),
+ musb_readl(ram, 2 * 4),
+ musb_readl(ram, 3 * 4),
+ musb_readl(ram, 4 * 4),
+ musb_readl(ram, 5 * 4),
+ musb_readl(ram, 6 * 4),
+ musb_readl(ram, 7 * 4));
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+
+ if (hw_ep == musb->control_ep
+ && !list_empty(
+ &musb->control)) {
+ code = dump_queue(&musb->control,
+ buf, max);
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ } else if (hw_ep == musb->bulk_ep
+ && !list_empty(
+ &musb->out_bulk)) {
+ code = dump_queue(&musb->out_bulk,
+ buf, max);
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ } else if (musb->periodic[epnum]) {
+ code = dump_qh(musb->periodic[epnum],
+ buf, max);
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ }
+ }
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_peripheral_active(musb)) {
+ code = 0;
+
+ if (hw_ep->ep_in.desc || !epnum) {
+ code = dump_ep(&hw_ep->ep_in, buf, max);
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ if (hw_ep->ep_out.desc) {
+ code = dump_ep(&hw_ep->ep_out, buf, max);
+ if (code <= 0)
+ break;
+ code = min(code, (int) max);
+ buf += code;
+ max -= code;
+ }
+ }
+#endif
+ } while (0);
+
+ return buf - aBuffer;
+}
+
+/* Dump the current status and compile options.
+ * @param musb the device driver instance
+ * @param buffer where to dump the status; it must be big enough to hold the
+ * result otherwise "BAD THINGS HAPPENS(TM)".
+ */
+static int dump_header_stats(struct musb *musb, char *buffer)
+{
+ int code, count = 0;
+ const void __iomem *mbase = musb->mregs;
+
+ *buffer = 0;
+ count = sprintf(buffer, "Status: %sHDRC, Mode=%s "
+ "(Power=%02x, DevCtl=%02x)\n",
+ (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb),
+ musb_readb(mbase, MUSB_POWER),
+ musb_readb(mbase, MUSB_DEVCTL));
+ if (count <= 0)
+ return 0;
+ buffer += count;
+
+ code = sprintf(buffer, "OTG state: %s; %sactive\n",
+ otg_state_string(musb),
+ musb->is_active ? "" : "in");
+ if (code <= 0)
+ goto done;
+ buffer += code;
+ count += code;
+
+ code = sprintf(buffer,
+ "Options: "
+#ifdef CONFIG_MUSB_PIO_ONLY
+ "pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ "cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+ "musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ "tusb-omap-dma"
+#else
+ "?dma?"
+#endif
+ ", "
+#ifdef CONFIG_USB_MUSB_OTG
+ "otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+ "peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+ "host"
+#endif
+ ", debug=%d [eps=%d]\n",
+ debug,
+ musb->nr_endpoints);
+ if (code <= 0)
+ goto done;
+ count += code;
+ buffer += code;
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ code = sprintf(buffer, "Peripheral address: %02x\n",
+ musb_readb(musb->ctrl_base, MUSB_FADDR));
+ if (code <= 0)
+ goto done;
+ buffer += code;
+ count += code;
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ code = sprintf(buffer, "Root port status: %08x\n",
+ musb->port1_status);
+ if (code <= 0)
+ goto done;
+ buffer += code;
+ count += code;
+#endif
+
+#ifdef CONFIG_ARCH_DAVINCI
+ code = sprintf(buffer,
+ "DaVinci: ctrl=%02x stat=%1x phy=%03x\n"
+ "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x"
+ "\n",
+ musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG),
+ musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG),
+ __raw_readl((void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR)),
+ musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG),
+ musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG),
+ musb_readl(musb->ctrl_base,
+ DAVINCI_USB_INT_SOURCE_REG),
+ musb_readl(musb->ctrl_base,
+ DAVINCI_USB_INT_MASK_REG));
+ if (code <= 0)
+ goto done;
+ count += code;
+ buffer += code;
+#endif /* DAVINCI */
+
+#ifdef CONFIG_USB_TUSB6010
+ code = sprintf(buffer,
+ "TUSB6010: devconf %08x, phy enable %08x drive %08x"
+ "\n\totg %03x timer %08x"
+ "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x"
+ "\n",
+ musb_readl(musb->ctrl_base, TUSB_DEV_CONF),
+ musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE),
+ musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL),
+ musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT),
+ musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER),
+ musb_readl(musb->ctrl_base, TUSB_PRCM_CONF),
+ musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT),
+ musb_readl(musb->ctrl_base, TUSB_INT_SRC),
+ musb_readl(musb->ctrl_base, TUSB_INT_MASK));
+ if (code <= 0)
+ goto done;
+ count += code;
+ buffer += code;
+#endif /* DAVINCI */
+
+ if (is_cppi_enabled() && musb->dma_controller) {
+ code = sprintf(buffer,
+ "CPPI: txcr=%d txsrc=%01x txena=%01x; "
+ "rxcr=%d rxsrc=%01x rxena=%01x "
+ "\n",
+ musb_readl(musb->ctrl_base,
+ DAVINCI_TXCPPI_CTRL_REG),
+ musb_readl(musb->ctrl_base,
+ DAVINCI_TXCPPI_RAW_REG),
+ musb_readl(musb->ctrl_base,
+ DAVINCI_TXCPPI_INTENAB_REG),
+ musb_readl(musb->ctrl_base,
+ DAVINCI_RXCPPI_CTRL_REG),
+ musb_readl(musb->ctrl_base,
+ DAVINCI_RXCPPI_RAW_REG),
+ musb_readl(musb->ctrl_base,
+ DAVINCI_RXCPPI_INTENAB_REG));
+ if (code <= 0)
+ goto done;
+ count += code;
+ buffer += code;
+ }
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_peripheral_enabled(musb)) {
+ code = sprintf(buffer, "Gadget driver: %s\n",
+ musb->gadget_driver
+ ? musb->gadget_driver->driver.name
+ : "(none)");
+ if (code <= 0)
+ goto done;
+ count += code;
+ buffer += code;
+ }
+#endif
+
+done:
+ return count;
+}
+
+/* Write to ProcFS
+ *
+ * C soft-connect
+ * c soft-disconnect
+ * I enable HS
+ * i disable HS
+ * s stop session
+ * F force session (OTG-unfriendly)
+ * E rElinquish bus (OTG)
+ * H request host mode
+ * h cancel host request
+ * T start sending TEST_PACKET
+ * D<num> set/query the debug level
+ */
+static int musb_proc_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char cmd;
+ u8 reg;
+ struct musb *musb = (struct musb *)data;
+ void __iomem *mbase = musb->mregs;
+
+ /* MOD_INC_USE_COUNT; */
+
+ if (unlikely(copy_from_user(&cmd, buffer, 1)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case 'C':
+ if (mbase) {
+ reg = musb_readb(mbase, MUSB_POWER)
+ | MUSB_POWER_SOFTCONN;
+ musb_writeb(mbase, MUSB_POWER, reg);
+ }
+ break;
+
+ case 'c':
+ if (mbase) {
+ reg = musb_readb(mbase, MUSB_POWER)
+ & ~MUSB_POWER_SOFTCONN;
+ musb_writeb(mbase, MUSB_POWER, reg);
+ }
+ break;
+
+ case 'I':
+ if (mbase) {
+ reg = musb_readb(mbase, MUSB_POWER)
+ | MUSB_POWER_HSENAB;
+ musb_writeb(mbase, MUSB_POWER, reg);
+ }
+ break;
+
+ case 'i':
+ if (mbase) {
+ reg = musb_readb(mbase, MUSB_POWER)
+ & ~MUSB_POWER_HSENAB;
+ musb_writeb(mbase, MUSB_POWER, reg);
+ }
+ break;
+
+ case 'F':
+ reg = musb_readb(mbase, MUSB_DEVCTL);
+ reg |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mbase, MUSB_DEVCTL, reg);
+ break;
+
+ case 'H':
+ if (mbase) {
+ reg = musb_readb(mbase, MUSB_DEVCTL);
+ reg |= MUSB_DEVCTL_HR;
+ musb_writeb(mbase, MUSB_DEVCTL, reg);
+ /* MUSB_HST_MODE( ((struct musb*)data) ); */
+ /* WARNING("Host Mode\n"); */
+ }
+ break;
+
+ case 'h':
+ if (mbase) {
+ reg = musb_readb(mbase, MUSB_DEVCTL);
+ reg &= ~MUSB_DEVCTL_HR;
+ musb_writeb(mbase, MUSB_DEVCTL, reg);
+ }
+ break;
+
+ case 'T':
+ if (mbase) {
+ musb_load_testpacket(musb);
+ musb_writeb(mbase, MUSB_TESTMODE,
+ MUSB_TEST_PACKET);
+ }
+ break;
+
+#if (MUSB_DEBUG > 0)
+ /* set/read debug level */
+ case 'D':{
+ if (count > 1) {
+ char digits[8], *p = digits;
+ int i = 0, level = 0, sign = 1;
+ int len = min(count - 1, (unsigned long)8);
+
+ if (copy_from_user(&digits, &buffer[1], len))
+ return -EFAULT;
+
+ /* optional sign */
+ if (*p == '-') {
+ len -= 1;
+ sign = -sign;
+ p++;
+ }
+
+ /* read it */
+ while (i++ < len && *p > '0' && *p < '9') {
+ level = level * 10 + (*p - '0');
+ p++;
+ }
+
+ level *= sign;
+ DBG(1, "debug level %d\n", level);
+ debug = level;
+ }
+ }
+ break;
+
+
+ case '?':
+ INFO("?: you are seeing it\n");
+ INFO("C/c: soft connect enable/disable\n");
+ INFO("I/i: hispeed enable/disable\n");
+ INFO("F: force session start\n");
+ INFO("H: host mode\n");
+ INFO("T: start sending TEST_PACKET\n");
+ INFO("D: set/read dbug level\n");
+ break;
+#endif
+
+ default:
+ ERR("Command %c not implemented\n", cmd);
+ break;
+ }
+
+ musb_platform_try_idle(musb, 0);
+
+ return count;
+}
+
+static int musb_proc_read(char *page, char **start,
+ off_t off, int count, int *eof, void *data)
+{
+ char *buffer = page;
+ int code = 0;
+ unsigned long flags;
+ struct musb *musb = data;
+ unsigned epnum;
+
+ count -= off;
+ count -= 1; /* for NUL at end */
+ if (count <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ code = dump_header_stats(musb, buffer);
+ if (code > 0) {
+ buffer += code;
+ count -= code;
+ }
+
+ /* generate the report for the end points */
+ /* REVISIT ... not unless something's connected! */
+ for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints;
+ epnum++) {
+ code = dump_end_info(musb, epnum, buffer, count);
+ if (code > 0) {
+ buffer += code;
+ count -= code;
+ }
+ }
+
+ musb_platform_try_idle(musb, 0);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ *eof = 1;
+
+ return buffer - page;
+}
+
+void __devexit musb_debug_delete(char *name, struct musb *musb)
+{
+ if (musb->proc_entry)
+ remove_proc_entry(name, NULL);
+}
+
+struct proc_dir_entry *__init
+musb_debug_create(char *name, struct musb *data)
+{
+ struct proc_dir_entry *pde;
+
+ /* FIXME convert everything to seq_file; then later, debugfs */
+
+ if (!name)
+ return NULL;
+
+ pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL);
+ data->proc_entry = pde;
+ if (pde) {
+ pde->data = data;
+ /* pde->owner = THIS_MODULE; */
+
+ pde->read_proc = musb_proc_read;
+ pde->write_proc = musb_proc_write;
+
+ pde->size = 0;
+
+ pr_debug("Registered /proc/%s\n", name);
+ } else {
+ pr_debug("Cannot create a valid proc file entry");
+ }
+
+ return pde;
+}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
new file mode 100644
index 000000000000..9c228661aa5a
--- /dev/null
+++ b/drivers/usb/musb/musb_regs.h
@@ -0,0 +1,300 @@
+/*
+ * MUSB OTG driver register defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_REGS_H__
+#define __MUSB_REGS_H__
+
+#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */
+
+/*
+ * Common USB registers
+ */
+
+#define MUSB_FADDR 0x00 /* 8-bit */
+#define MUSB_POWER 0x01 /* 8-bit */
+
+#define MUSB_INTRTX 0x02 /* 16-bit */
+#define MUSB_INTRRX 0x04
+#define MUSB_INTRTXE 0x06
+#define MUSB_INTRRXE 0x08
+#define MUSB_INTRUSB 0x0A /* 8 bit */
+#define MUSB_INTRUSBE 0x0B /* 8 bit */
+#define MUSB_FRAME 0x0C
+#define MUSB_INDEX 0x0E /* 8 bit */
+#define MUSB_TESTMODE 0x0F /* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#ifdef CONFIG_USB_TUSB6010
+#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
+#else
+#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
+#endif
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL 0x60 /* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
+#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
+#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
+#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
+
+/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
+#define MUSB_HWVERS 0x6C /* 8 bit */
+
+#define MUSB_EPINFO 0x78 /* 8 bit */
+#define MUSB_RAMINFO 0x79 /* 8 bit */
+#define MUSB_LINKINFO 0x7a /* 8 bit */
+#define MUSB_VPLEN 0x7b /* 8 bit */
+#define MUSB_HS_EOF1 0x7c /* 8 bit */
+#define MUSB_FS_EOF1 0x7d /* 8 bit */
+#define MUSB_LS_EOF1 0x7e /* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP 0x00
+#define MUSB_TXCSR 0x02
+#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
+#define MUSB_RXMAXP 0x04
+#define MUSB_RXCSR 0x06
+#define MUSB_RXCOUNT 0x08
+#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
+#define MUSB_TXTYPE 0x0A
+#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
+#define MUSB_TXINTERVAL 0x0B
+#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
+#define MUSB_RXTYPE 0x0C
+#define MUSB_RXINTERVAL 0x0D
+#define MUSB_FIFOSIZE 0x0F
+#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
+ (0x10 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset) \
+ (0x100 + (0x10*(_epnum)) + (_offset))
+
+#ifdef CONFIG_USB_TUSB6010
+/* TUSB6010 EP0 configuration register is special */
+#define MUSB_TUSB_OFFSET(_epnum, _offset) \
+ (0x10 + _offset)
+#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
+#endif
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSB_TXFUNCADDR 0x00
+#define MUSB_TXHUBADDR 0x02
+#define MUSB_TXHUBPORT 0x03
+
+#define MUSB_RXFUNCADDR 0x04
+#define MUSB_RXHUBADDR 0x06
+#define MUSB_RXHUBPORT 0x07
+
+#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
+ (0x80 + (8*(_epnum)) + (_offset))
+
+/*
+ * MUSB Register bits
+ */
+
+/* POWER */
+#define MUSB_POWER_ISOUPDATE 0x80
+#define MUSB_POWER_SOFTCONN 0x40
+#define MUSB_POWER_HSENAB 0x20
+#define MUSB_POWER_HSMODE 0x10
+#define MUSB_POWER_RESET 0x08
+#define MUSB_POWER_RESUME 0x04
+#define MUSB_POWER_SUSPENDM 0x02
+#define MUSB_POWER_ENSUSPEND 0x01
+
+/* INTRUSB */
+#define MUSB_INTR_SUSPEND 0x01
+#define MUSB_INTR_RESUME 0x02
+#define MUSB_INTR_RESET 0x04
+#define MUSB_INTR_BABBLE 0x04
+#define MUSB_INTR_SOF 0x08
+#define MUSB_INTR_CONNECT 0x10
+#define MUSB_INTR_DISCONNECT 0x20
+#define MUSB_INTR_SESSREQ 0x40
+#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */
+
+/* DEVCTL */
+#define MUSB_DEVCTL_BDEVICE 0x80
+#define MUSB_DEVCTL_FSDEV 0x40
+#define MUSB_DEVCTL_LSDEV 0x20
+#define MUSB_DEVCTL_VBUS 0x18
+#define MUSB_DEVCTL_VBUS_SHIFT 3
+#define MUSB_DEVCTL_HM 0x04
+#define MUSB_DEVCTL_HR 0x02
+#define MUSB_DEVCTL_SESSION 0x01
+
+/* TESTMODE */
+#define MUSB_TEST_FORCE_HOST 0x80
+#define MUSB_TEST_FIFO_ACCESS 0x40
+#define MUSB_TEST_FORCE_FS 0x20
+#define MUSB_TEST_FORCE_HS 0x10
+#define MUSB_TEST_PACKET 0x08
+#define MUSB_TEST_K 0x04
+#define MUSB_TEST_J 0x02
+#define MUSB_TEST_SE0_NAK 0x01
+
+/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
+#define MUSB_FIFOSZ_DPB 0x10
+/* Allocation size (8, 16, 32, ... 4096) */
+#define MUSB_FIFOSZ_SIZE 0x0f
+
+/* CSR0 */
+#define MUSB_CSR0_FLUSHFIFO 0x0100
+#define MUSB_CSR0_TXPKTRDY 0x0002
+#define MUSB_CSR0_RXPKTRDY 0x0001
+
+/* CSR0 in Peripheral mode */
+#define MUSB_CSR0_P_SVDSETUPEND 0x0080
+#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040
+#define MUSB_CSR0_P_SENDSTALL 0x0020
+#define MUSB_CSR0_P_SETUPEND 0x0010
+#define MUSB_CSR0_P_DATAEND 0x0008
+#define MUSB_CSR0_P_SENTSTALL 0x0004
+
+/* CSR0 in Host mode */
+#define MUSB_CSR0_H_DIS_PING 0x0800
+#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */
+#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */
+#define MUSB_CSR0_H_NAKTIMEOUT 0x0080
+#define MUSB_CSR0_H_STATUSPKT 0x0040
+#define MUSB_CSR0_H_REQPKT 0x0020
+#define MUSB_CSR0_H_ERROR 0x0010
+#define MUSB_CSR0_H_SETUPPKT 0x0008
+#define MUSB_CSR0_H_RXSTALL 0x0004
+
+/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_CSR0_P_WZC_BITS \
+ (MUSB_CSR0_P_SENTSTALL)
+#define MUSB_CSR0_H_WZC_BITS \
+ (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \
+ | MUSB_CSR0_RXPKTRDY)
+
+/* TxType/RxType */
+#define MUSB_TYPE_SPEED 0xc0
+#define MUSB_TYPE_SPEED_SHIFT 6
+#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */
+#define MUSB_TYPE_PROTO_SHIFT 4
+#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */
+
+/* CONFIGDATA */
+#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */
+#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */
+#define MUSB_CONFIGDATA_BIGENDIAN 0x20
+#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */
+#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */
+#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */
+#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */
+#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */
+
+/* TXCSR in Peripheral and Host mode */
+#define MUSB_TXCSR_AUTOSET 0x8000
+#define MUSB_TXCSR_MODE 0x2000
+#define MUSB_TXCSR_DMAENAB 0x1000
+#define MUSB_TXCSR_FRCDATATOG 0x0800
+#define MUSB_TXCSR_DMAMODE 0x0400
+#define MUSB_TXCSR_CLRDATATOG 0x0040
+#define MUSB_TXCSR_FLUSHFIFO 0x0008
+#define MUSB_TXCSR_FIFONOTEMPTY 0x0002
+#define MUSB_TXCSR_TXPKTRDY 0x0001
+
+/* TXCSR in Peripheral mode */
+#define MUSB_TXCSR_P_ISO 0x4000
+#define MUSB_TXCSR_P_INCOMPTX 0x0080
+#define MUSB_TXCSR_P_SENTSTALL 0x0020
+#define MUSB_TXCSR_P_SENDSTALL 0x0010
+#define MUSB_TXCSR_P_UNDERRUN 0x0004
+
+/* TXCSR in Host mode */
+#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200
+#define MUSB_TXCSR_H_DATATOGGLE 0x0100
+#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080
+#define MUSB_TXCSR_H_RXSTALL 0x0020
+#define MUSB_TXCSR_H_ERROR 0x0004
+
+/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_TXCSR_P_WZC_BITS \
+ (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \
+ | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY)
+#define MUSB_TXCSR_H_WZC_BITS \
+ (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \
+ | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY)
+
+/* RXCSR in Peripheral and Host mode */
+#define MUSB_RXCSR_AUTOCLEAR 0x8000
+#define MUSB_RXCSR_DMAENAB 0x2000
+#define MUSB_RXCSR_DISNYET 0x1000
+#define MUSB_RXCSR_PID_ERR 0x1000
+#define MUSB_RXCSR_DMAMODE 0x0800
+#define MUSB_RXCSR_INCOMPRX 0x0100
+#define MUSB_RXCSR_CLRDATATOG 0x0080
+#define MUSB_RXCSR_FLUSHFIFO 0x0010
+#define MUSB_RXCSR_DATAERROR 0x0008
+#define MUSB_RXCSR_FIFOFULL 0x0002
+#define MUSB_RXCSR_RXPKTRDY 0x0001
+
+/* RXCSR in Peripheral mode */
+#define MUSB_RXCSR_P_ISO 0x4000
+#define MUSB_RXCSR_P_SENTSTALL 0x0040
+#define MUSB_RXCSR_P_SENDSTALL 0x0020
+#define MUSB_RXCSR_P_OVERRUN 0x0004
+
+/* RXCSR in Host mode */
+#define MUSB_RXCSR_H_AUTOREQ 0x4000
+#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400
+#define MUSB_RXCSR_H_DATATOGGLE 0x0200
+#define MUSB_RXCSR_H_RXSTALL 0x0040
+#define MUSB_RXCSR_H_REQPKT 0x0020
+#define MUSB_RXCSR_H_ERROR 0x0004
+
+/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_RXCSR_P_WZC_BITS \
+ (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \
+ | MUSB_RXCSR_RXPKTRDY)
+#define MUSB_RXCSR_H_WZC_BITS \
+ (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \
+ | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY)
+
+/* HUBADDR */
+#define MUSB_HUBADDR_MULTI_TT 0x80
+
+#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
new file mode 100644
index 000000000000..e0e9ce584175
--- /dev/null
+++ b/drivers/usb/musb/musb_virthub.c
@@ -0,0 +1,425 @@
+/*
+ * MUSB OTG driver virtual root hub support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include <asm/unaligned.h>
+
+#include "musb_core.h"
+
+
+static void musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+ if (!is_host_active(musb))
+ return;
+
+ /* NOTE: this doesn't necessarily put PHY into low power mode,
+ * turning off its clock; that's a function of PHY integration and
+ * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect
+ * SE0 changing to connect (J) or wakeup (K) states.
+ */
+ power = musb_readb(mbase, MUSB_POWER);
+ if (do_suspend) {
+ int retries = 10000;
+
+ power &= ~MUSB_POWER_RESUME;
+ power |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ /* Needed for OPT A tests */
+ power = musb_readb(mbase, MUSB_POWER);
+ while (power & MUSB_POWER_SUSPENDM) {
+ power = musb_readb(mbase, MUSB_POWER);
+ if (retries-- < 1)
+ break;
+ }
+
+ DBG(3, "Root port suspended, power %02x\n", power);
+
+ musb->port1_status |= USB_PORT_STAT_SUSPEND;
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ musb->xceiv.state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ musb_platform_try_idle(musb, 0);
+ break;
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_B_HOST:
+ musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ musb_platform_try_idle(musb, 0);
+ break;
+#endif
+ default:
+ DBG(1, "bogus rh suspend? %s\n",
+ otg_state_string(musb));
+ }
+ } else if (power & MUSB_POWER_SUSPENDM) {
+ power &= ~MUSB_POWER_SUSPENDM;
+ power |= MUSB_POWER_RESUME;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ DBG(3, "Root port resuming, power %02x\n", power);
+
+ /* later, GetPortStatus will stop RESUME signaling */
+ musb->port1_status |= MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies + msecs_to_jiffies(20);
+ }
+}
+
+static void musb_port_reset(struct musb *musb, bool do_reset)
+{
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ if (musb->xceiv.state == OTG_STATE_B_IDLE) {
+ DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ return;
+ }
+#endif
+
+ if (!is_host_active(musb))
+ return;
+
+ /* NOTE: caller guarantees it will turn off the reset when
+ * the appropriate amount of time has passed
+ */
+ power = musb_readb(mbase, MUSB_POWER);
+ if (do_reset) {
+
+ /*
+ * If RESUME is set, we must make sure it stays minimum 20 ms.
+ * Then we must clear RESUME and wait a bit to let musb start
+ * generating SOFs. If we don't do this, OPT HS A 6.8 tests
+ * fail with "Error! Did not receive an SOF before suspend
+ * detected".
+ */
+ if (power & MUSB_POWER_RESUME) {
+ while (time_before(jiffies, musb->rh_timer))
+ msleep(1);
+ musb_writeb(mbase, MUSB_POWER,
+ power & ~MUSB_POWER_RESUME);
+ msleep(1);
+ }
+
+ musb->ignore_disconnect = true;
+ power &= 0xf0;
+ musb_writeb(mbase, MUSB_POWER,
+ power | MUSB_POWER_RESET);
+
+ musb->port1_status |= USB_PORT_STAT_RESET;
+ musb->port1_status &= ~USB_PORT_STAT_ENABLE;
+ musb->rh_timer = jiffies + msecs_to_jiffies(50);
+ } else {
+ DBG(4, "root port reset stopped\n");
+ musb_writeb(mbase, MUSB_POWER,
+ power & ~MUSB_POWER_RESET);
+
+ musb->ignore_disconnect = false;
+
+ power = musb_readb(mbase, MUSB_POWER);
+ if (power & MUSB_POWER_HSMODE) {
+ DBG(4, "high-speed device connected\n");
+ musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
+ }
+
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ musb->port1_status |= USB_PORT_STAT_ENABLE
+ | (USB_PORT_STAT_C_RESET << 16)
+ | (USB_PORT_STAT_C_ENABLE << 16);
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+ }
+}
+
+void musb_root_disconnect(struct musb *musb)
+{
+ musb->port1_status = (1 << USB_PORT_FEAT_POWER)
+ | (1 << USB_PORT_FEAT_C_CONNECTION);
+
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ musb->is_active = 0;
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ default:
+ DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
+ }
+}
+
+
+/*---------------------------------------------------------------------*/
+
+/* Caller may or may not hold musb->lock */
+int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ int retval = 0;
+
+ /* called in_irq() via usb_hcd_poll_rh_status() */
+ if (musb->port1_status & 0xffff0000) {
+ *buf = 0x02;
+ retval = 1;
+ }
+ return retval;
+}
+
+int musb_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ u32 temp;
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ /* hub features: always zero, setting is a NOP
+ * port features: reported, sometimes updated when host is active
+ * no indicators
+ */
+ switch (typeReq) {
+ case ClearHubFeature:
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if ((wIndex & 0xff) != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, false);
+ break;
+ case USB_PORT_FEAT_POWER:
+ if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+ musb_set_vbus(musb, 0);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_SUSPEND:
+ break;
+ default:
+ goto error;
+ }
+ DBG(5, "clear feature %d\n", wValue);
+ musb->port1_status &= ~(1 << wValue);
+ break;
+ case GetHubDescriptor:
+ {
+ struct usb_hub_descriptor *desc = (void *)buf;
+
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = __constant_cpu_to_le16(
+ 0x0001 /* per-port power switching */
+ | 0x0010 /* no overcurrent reporting */
+ );
+ desc->bPwrOn2PwrGood = 5; /* msec/2 */
+ desc->bHubContrCurrent = 0;
+
+ /* workaround bogus struct definition */
+ desc->DeviceRemovable[0] = 0x02; /* port 1 */
+ desc->DeviceRemovable[1] = 0xff;
+ }
+ break;
+ case GetHubStatus:
+ temp = 0;
+ *(__le32 *) buf = cpu_to_le32(temp);
+ break;
+ case GetPortStatus:
+ if (wIndex != 1)
+ goto error;
+
+ /* finish RESET signaling? */
+ if ((musb->port1_status & USB_PORT_STAT_RESET)
+ && time_after_eq(jiffies, musb->rh_timer))
+ musb_port_reset(musb, false);
+
+ /* finish RESUME signaling? */
+ if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
+ && time_after_eq(jiffies, musb->rh_timer)) {
+ u8 power;
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ DBG(4, "root port resume stopped, power %02x\n",
+ power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+
+ /* ISSUE: DaVinci (RTL 1.300) disconnects after
+ * resume of high speed peripherals (but not full
+ * speed ones).
+ */
+
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+ | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ }
+
+ put_unaligned(cpu_to_le32(musb->port1_status
+ & ~MUSB_PORT_STAT_RESUME),
+ (__le32 *) buf);
+
+ /* port change status is more interesting */
+ DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
+ musb->port1_status);
+ break;
+ case SetPortFeature:
+ if ((wIndex & 0xff) != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ /* NOTE: this controller has a strange state machine
+ * that involves "requesting sessions" according to
+ * magic side effects from incompletely-described
+ * rules about startup...
+ *
+ * This call is what really starts the host mode; be
+ * very careful about side effects if you reorder any
+ * initialization logic, e.g. for OTG, or change any
+ * logic relating to VBUS power-up.
+ */
+ if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+ musb_start(musb);
+ break;
+ case USB_PORT_FEAT_RESET:
+ musb_port_reset(musb, true);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, true);
+ break;
+ case USB_PORT_FEAT_TEST:
+ if (unlikely(is_host_active(musb)))
+ goto error;
+
+ wIndex >>= 8;
+ switch (wIndex) {
+ case 1:
+ pr_debug("TEST_J\n");
+ temp = MUSB_TEST_J;
+ break;
+ case 2:
+ pr_debug("TEST_K\n");
+ temp = MUSB_TEST_K;
+ break;
+ case 3:
+ pr_debug("TEST_SE0_NAK\n");
+ temp = MUSB_TEST_SE0_NAK;
+ break;
+ case 4:
+ pr_debug("TEST_PACKET\n");
+ temp = MUSB_TEST_PACKET;
+ musb_load_testpacket(musb);
+ break;
+ case 5:
+ pr_debug("TEST_FORCE_ENABLE\n");
+ temp = MUSB_TEST_FORCE_HOST
+ | MUSB_TEST_FORCE_HS;
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL,
+ MUSB_DEVCTL_SESSION);
+ break;
+ case 6:
+ pr_debug("TEST_FIFO_ACCESS\n");
+ temp = MUSB_TEST_FIFO_ACCESS;
+ break;
+ default:
+ goto error;
+ }
+ musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
+ break;
+ default:
+ goto error;
+ }
+ DBG(5, "set feature %d\n", wValue);
+ musb->port1_status |= 1 << wValue;
+ break;
+
+ default:
+error:
+ /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return retval;
+}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
new file mode 100644
index 000000000000..9ba8fb7fcd24
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.c
@@ -0,0 +1,433 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include "musb_core.h"
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include "omap2430.h"
+#endif
+
+#define MUSB_HSDMA_BASE 0x200
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x4
+#define MUSB_HSDMA_ADDRESS 0x8
+#define MUSB_HSDMA_COUNT 0xc
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \
+ (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset)
+
+/* control register (16-bit): */
+#define MUSB_HSDMA_ENABLE_SHIFT 0
+#define MUSB_HSDMA_TRANSMIT_SHIFT 1
+#define MUSB_HSDMA_MODE1_SHIFT 2
+#define MUSB_HSDMA_IRQENABLE_SHIFT 3
+#define MUSB_HSDMA_ENDPOINT_SHIFT 4
+#define MUSB_HSDMA_BUSERROR_SHIFT 8
+#define MUSB_HSDMA_BURSTMODE_SHIFT 9
+#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
+#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
+#define MUSB_HSDMA_BURSTMODE_INCR4 1
+#define MUSB_HSDMA_BURSTMODE_INCR8 2
+#define MUSB_HSDMA_BURSTMODE_INCR16 3
+
+#define MUSB_HSDMA_CHANNELS 8
+
+struct musb_dma_controller;
+
+struct musb_dma_channel {
+ struct dma_channel Channel;
+ struct musb_dma_controller *controller;
+ u32 dwStartAddress;
+ u32 len;
+ u16 wMaxPacketSize;
+ u8 bIndex;
+ u8 epnum;
+ u8 transmit;
+};
+
+struct musb_dma_controller {
+ struct dma_controller Controller;
+ struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS];
+ void *pDmaPrivate;
+ void __iomem *pCoreBase;
+ u8 bChannelCount;
+ u8 bmUsedChannels;
+ u8 irq;
+};
+
+static int dma_controller_start(struct dma_controller *c)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel);
+
+static int dma_controller_stop(struct dma_controller *c)
+{
+ struct musb_dma_controller *controller =
+ container_of(c, struct musb_dma_controller, Controller);
+ struct musb *musb = (struct musb *) controller->pDmaPrivate;
+ struct dma_channel *pChannel;
+ u8 bBit;
+
+ if (controller->bmUsedChannels != 0) {
+ dev_err(musb->controller,
+ "Stopping DMA controller while channel active\n");
+
+ for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+ if (controller->bmUsedChannels & (1 << bBit)) {
+ pChannel = &controller->aChannel[bBit].Channel;
+ dma_channel_release(pChannel);
+
+ if (!controller->bmUsedChannels)
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 transmit)
+{
+ u8 bBit;
+ struct dma_channel *pChannel = NULL;
+ struct musb_dma_channel *pImplChannel = NULL;
+ struct musb_dma_controller *controller =
+ container_of(c, struct musb_dma_controller, Controller);
+
+ for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+ if (!(controller->bmUsedChannels & (1 << bBit))) {
+ controller->bmUsedChannels |= (1 << bBit);
+ pImplChannel = &(controller->aChannel[bBit]);
+ pImplChannel->controller = controller;
+ pImplChannel->bIndex = bBit;
+ pImplChannel->epnum = hw_ep->epnum;
+ pImplChannel->transmit = transmit;
+ pChannel = &(pImplChannel->Channel);
+ pChannel->private_data = pImplChannel;
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+ pChannel->max_len = 0x10000;
+ /* Tx => mode 1; Rx => mode 0 */
+ pChannel->desired_mode = transmit;
+ pChannel->actual_len = 0;
+ break;
+ }
+ }
+ return pChannel;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+
+ pChannel->actual_len = 0;
+ pImplChannel->dwStartAddress = 0;
+ pImplChannel->len = 0;
+
+ pImplChannel->controller->bmUsedChannels &=
+ ~(1 << pImplChannel->bIndex);
+
+ pChannel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+static void configure_channel(struct dma_channel *pChannel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+ struct musb_dma_controller *controller = pImplChannel->controller;
+ void __iomem *mbase = controller->pCoreBase;
+ u8 bChannel = pImplChannel->bIndex;
+ u16 csr = 0;
+
+ DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
+ pChannel, packet_sz, dma_addr, len, mode);
+
+ if (mode) {
+ csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
+ BUG_ON(len < packet_sz);
+
+ if (packet_sz >= 64) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR16
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ } else if (packet_sz >= 32) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR8
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ } else if (packet_sz >= 16) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR4
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ }
+ }
+
+ csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
+ | (1 << MUSB_HSDMA_ENABLE_SHIFT)
+ | (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
+ | (pImplChannel->transmit
+ ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
+ : 0);
+
+ /* address/count */
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+ dma_addr);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+ len);
+
+ /* control (this should start things) */
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+ csr);
+}
+
+static int dma_channel_program(struct dma_channel *pChannel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+
+ DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+ pImplChannel->epnum,
+ pImplChannel->transmit ? "Tx" : "Rx",
+ packet_sz, dma_addr, len, mode);
+
+ BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ pChannel->status == MUSB_DMA_STATUS_BUSY);
+
+ pChannel->actual_len = 0;
+ pImplChannel->dwStartAddress = dma_addr;
+ pImplChannel->len = len;
+ pImplChannel->wMaxPacketSize = packet_sz;
+ pChannel->status = MUSB_DMA_STATUS_BUSY;
+
+ if ((mode == 1) && (len >= packet_sz))
+ configure_channel(pChannel, packet_sz, 1, dma_addr, len);
+ else
+ configure_channel(pChannel, packet_sz, 0, dma_addr, len);
+
+ return true;
+}
+
+static int dma_channel_abort(struct dma_channel *pChannel)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+ u8 bChannel = pImplChannel->bIndex;
+ void __iomem *mbase = pImplChannel->controller->pCoreBase;
+ u16 csr;
+
+ if (pChannel->status == MUSB_DMA_STATUS_BUSY) {
+ if (pImplChannel->transmit) {
+
+ csr = musb_readw(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_TXCSR));
+ csr &= ~(MUSB_TXCSR_AUTOSET |
+ MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_DMAMODE);
+ musb_writew(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_TXCSR),
+ csr);
+ } else {
+ csr = musb_readw(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_RXCSR));
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR |
+ MUSB_RXCSR_DMAENAB |
+ MUSB_RXCSR_DMAMODE);
+ musb_writew(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_RXCSR),
+ csr);
+ }
+
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+ 0);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+ 0);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+ 0);
+
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+ }
+ return 0;
+}
+
+static irqreturn_t dma_controller_irq(int irq, void *private_data)
+{
+ struct musb_dma_controller *controller =
+ (struct musb_dma_controller *)private_data;
+ struct musb_dma_channel *pImplChannel;
+ struct musb *musb = controller->pDmaPrivate;
+ void __iomem *mbase = controller->pCoreBase;
+ struct dma_channel *pChannel;
+ u8 bChannel;
+ u16 csr;
+ u32 dwAddress;
+ u8 int_hsdma;
+ irqreturn_t retval = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+ if (!int_hsdma)
+ goto done;
+
+ for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) {
+ if (int_hsdma & (1 << bChannel)) {
+ pImplChannel = (struct musb_dma_channel *)
+ &(controller->aChannel[bChannel]);
+ pChannel = &pImplChannel->Channel;
+
+ csr = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
+ MUSB_HSDMA_CONTROL));
+
+ if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
+ pImplChannel->Channel.status =
+ MUSB_DMA_STATUS_BUS_ABORT;
+ else {
+ u8 devctl;
+
+ dwAddress = musb_readl(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(
+ bChannel,
+ MUSB_HSDMA_ADDRESS));
+ pChannel->actual_len = dwAddress
+ - pImplChannel->dwStartAddress;
+
+ DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+ pChannel, pImplChannel->dwStartAddress,
+ dwAddress, pChannel->actual_len,
+ pImplChannel->len,
+ (pChannel->actual_len
+ < pImplChannel->len) ?
+ "=> reconfig 0" : "=> complete");
+
+ devctl = musb_readb(mbase, MUSB_DEVCTL);
+
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+
+ /* completed */
+ if ((devctl & MUSB_DEVCTL_HM)
+ && (pImplChannel->transmit)
+ && ((pChannel->desired_mode == 0)
+ || (pChannel->actual_len &
+ (pImplChannel->wMaxPacketSize - 1)))
+ ) {
+ /* Send out the packet */
+ musb_ep_select(mbase,
+ pImplChannel->epnum);
+ musb_writew(mbase, MUSB_EP_OFFSET(
+ pImplChannel->epnum,
+ MUSB_TXCSR),
+ MUSB_TXCSR_TXPKTRDY);
+ } else
+ musb_dma_completion(
+ musb,
+ pImplChannel->epnum,
+ pImplChannel->transmit);
+ }
+ }
+ }
+ retval = IRQ_HANDLED;
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return retval;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct musb_dma_controller *controller;
+
+ controller = container_of(c, struct musb_dma_controller, Controller);
+ if (!controller)
+ return;
+
+ if (controller->irq)
+ free_irq(controller->irq, c);
+
+ kfree(controller);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *pCoreBase)
+{
+ struct musb_dma_controller *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq(pdev, 1);
+
+ if (irq == 0) {
+ dev_err(dev, "No DMA interrupt line!\n");
+ return NULL;
+ }
+
+ controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL);
+ if (!controller)
+ return NULL;
+
+ controller->bChannelCount = MUSB_HSDMA_CHANNELS;
+ controller->pDmaPrivate = musb;
+ controller->pCoreBase = pCoreBase;
+
+ controller->Controller.start = dma_controller_start;
+ controller->Controller.stop = dma_controller_stop;
+ controller->Controller.channel_alloc = dma_channel_allocate;
+ controller->Controller.channel_release = dma_channel_release;
+ controller->Controller.channel_program = dma_channel_program;
+ controller->Controller.channel_abort = dma_channel_abort;
+
+ if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
+ musb->controller->bus_id, &controller->Controller)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+ dma_controller_destroy(&controller->Controller);
+ return NULL;
+ }
+
+ controller->irq = irq;
+
+ return &controller->Controller;
+}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
new file mode 100644
index 000000000000..298b22e6ad0d
--- /dev/null
+++ b/drivers/usb/musb/omap2430.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2005-2007 by Texas Instruments
+ * Some code has been taken from tusb6010.c
+ * Copyrights for that are attributable to:
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+#include "omap2430.h"
+
+#ifdef CONFIG_ARCH_OMAP3430
+#define get_cpu_rev() 2
+#endif
+
+#define MUSB_TIMEOUT_A_WAIT_BCON 1100
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+ u8 power;
+ u8 devctl;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_SUSPEND:
+ /* finish RESUME signaling? */
+ if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ DBG(1, "root port resume stopped, power %02x\n", power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+ | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ }
+ break;
+#endif
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_HOST:
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ else
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+#endif
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+ unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = default_timeout;
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+ DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+ del_timer(&musb_idle_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&musb_idle_timer))
+ last_timer = timeout;
+ else {
+ DBG(4, "Longer idle timer already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ DBG(4, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb_idle_timer, timeout);
+}
+
+void musb_platform_enable(struct musb *musb)
+{
+}
+void musb_platform_disable(struct musb *musb)
+{
+}
+static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+}
+
+static void omap_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ musb->is_active = 1;
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+
+ /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
+ * jumping right to B_IDLE...
+ */
+
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ MUSB_DEV_MODE(musb);
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ DBG(1, "VBUS %s, devctl %02x "
+ /* otg %3x conf %08x prcm %08x */ "\n",
+ otg_state_string(musb),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+static int omap_set_power(struct otg_transceiver *x, unsigned mA)
+{
+ return 0;
+}
+
+static int musb_platform_resume(struct musb *musb);
+
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ switch (musb_mode) {
+ case MUSB_HOST:
+ otg_set_host(&musb->xceiv, musb->xceiv.host);
+ break;
+ case MUSB_PERIPHERAL:
+ otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
+ break;
+ case MUSB_OTG:
+ break;
+ }
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ u32 l;
+
+#if defined(CONFIG_ARCH_OMAP2430)
+ omap_cfg_reg(AE5_2430_USB0HS_STP);
+#endif
+
+ musb_platform_resume(musb);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l &= ~ENABLEWAKEUP; /* disable wakeup */
+ l &= ~NOSTDBY; /* remove possible nostdby */
+ l |= SMARTSTDBY; /* enable smart standby */
+ l &= ~AUTOIDLE; /* disable auto idle */
+ l &= ~NOIDLE; /* remove possible noidle */
+ l |= SMARTIDLE; /* enable smart idle */
+ l |= AUTOIDLE; /* enable auto idle */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ l = omap_readl(OTG_INTERFSEL);
+ l |= ULPI_12PIN;
+ omap_writel(l, OTG_INTERFSEL);
+
+ pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+ "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
+ omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
+ omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
+ omap_readl(OTG_SIMENABLE));
+
+ omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
+
+ if (is_host_enabled(musb))
+ musb->board_set_vbus = omap_set_vbus;
+ if (is_peripheral_enabled(musb))
+ musb->xceiv.set_power = omap_set_power;
+ musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
+
+ setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+ return 0;
+}
+
+int musb_platform_suspend(struct musb *musb)
+{
+ u32 l;
+
+ if (!musb->clock)
+ return 0;
+
+ /* in any role */
+ l = omap_readl(OTG_FORCESTDBY);
+ l |= ENABLEFORCE; /* enable MSTANDBY */
+ omap_writel(l, OTG_FORCESTDBY);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l |= ENABLEWAKEUP; /* enable wakeup */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ if (musb->xceiv.set_suspend)
+ musb->xceiv.set_suspend(&musb->xceiv, 1);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ else
+ clk_disable(musb->clock);
+
+ return 0;
+}
+
+static int musb_platform_resume(struct musb *musb)
+{
+ u32 l;
+
+ if (!musb->clock)
+ return 0;
+
+ if (musb->xceiv.set_suspend)
+ musb->xceiv.set_suspend(&musb->xceiv, 0);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ else
+ clk_enable(musb->clock);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l &= ~ENABLEWAKEUP; /* disable wakeup */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ l = omap_readl(OTG_FORCESTDBY);
+ l &= ~ENABLEFORCE; /* disable MSTANDBY */
+ omap_writel(l, OTG_FORCESTDBY);
+
+ return 0;
+}
+
+
+int musb_platform_exit(struct musb *musb)
+{
+
+ omap_vbus_power(musb, 0 /*off*/, 1);
+
+ musb_platform_suspend(musb);
+
+ clk_put(musb->clock);
+ musb->clock = 0;
+
+ return 0;
+}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
new file mode 100644
index 000000000000..786a62071f72
--- /dev/null
+++ b/drivers/usb/musb/omap2430.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_OMAP243X_H__
+#define __MUSB_OMAP243X_H__
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include <asm/arch/hardware.h>
+#include <asm/arch/usb.h>
+
+/*
+ * OMAP2430-specific definitions
+ */
+
+#define MENTOR_BASE_OFFSET 0
+#if defined(CONFIG_ARCH_OMAP2430)
+#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
+#elif defined(CONFIG_ARCH_OMAP3430)
+#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
+#endif
+#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
+#define OTG_REVISION OMAP_HSOTG(0x0)
+#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
+# define MIDLEMODE 12 /* bit position */
+# define FORCESTDBY (0 << MIDLEMODE)
+# define NOSTDBY (1 << MIDLEMODE)
+# define SMARTSTDBY (2 << MIDLEMODE)
+# define SIDLEMODE 3 /* bit position */
+# define FORCEIDLE (0 << SIDLEMODE)
+# define NOIDLE (1 << SIDLEMODE)
+# define SMARTIDLE (2 << SIDLEMODE)
+# define ENABLEWAKEUP (1 << 2)
+# define SOFTRST (1 << 1)
+# define AUTOIDLE (1 << 0)
+#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
+# define RESETDONE (1 << 0)
+#define OTG_INTERFSEL OMAP_HSOTG(0xc)
+# define EXTCP (1 << 2)
+# define PHYSEL 0 /* bit position */
+# define UTMI_8BIT (0 << PHYSEL)
+# define ULPI_12PIN (1 << PHYSEL)
+# define ULPI_8PIN (2 << PHYSEL)
+#define OTG_SIMENABLE OMAP_HSOTG(0x10)
+# define TM1 (1 << 0)
+#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
+# define ENABLEFORCE (1 << 0)
+
+#endif /* CONFIG_ARCH_OMAP2430 */
+
+#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
new file mode 100644
index 000000000000..b73b036f3d77
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.c
@@ -0,0 +1,1151 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Notes:
+ * - Driver assumes that interface to external host (main CPU) is
+ * configured for NOR FLASH interface instead of VLYNQ serial
+ * interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include "musb_core.h"
+
+static void tusb_source_power(struct musb *musb, int is_on);
+
+#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
+#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
+
+/*
+ * Checks the revision. We need to use the DMA register as 3.0 does not
+ * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
+ */
+u8 tusb_get_revision(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 die_id;
+ u8 rev;
+
+ rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
+ if (TUSB_REV_MAJOR(rev) == 3) {
+ die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
+ TUSB_DIDR1_HI));
+ if (die_id >= TUSB_DIDR1_HI_REV_31)
+ rev |= 1;
+ }
+
+ return rev;
+}
+
+static int __init tusb_print_revision(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u8 rev;
+
+ rev = tusb_get_revision(musb);
+
+ pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
+ "prcm",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
+ "int",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+ "gpio",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
+ "dma",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+ "dieid",
+ TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
+ "rev",
+ TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
+
+ return tusb_get_revision(musb);
+}
+
+#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
+ | TUSB_PHY_OTG_CTRL_TESTM0)
+
+/*
+ * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
+ * Disables power detection in PHY for the duration of idle.
+ */
+static void tusb_wbus_quirk(struct musb *musb, int enabled)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ static u32 phy_otg_ctrl, phy_otg_ena;
+ u32 tmp;
+
+ if (enabled) {
+ phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
+ | phy_otg_ena | WBUS_QUIRK_MASK;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+ tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
+ tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+ DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+ } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
+ & TUSB_PHY_OTG_CTRL_TESTM2) {
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+ DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+ phy_otg_ctrl = 0;
+ phy_otg_ena = 0;
+ }
+}
+
+/*
+ * TUSB 6010 may use a parallel bus that doesn't support byte ops;
+ * so both loading and unloading FIFOs need explicit byte counts.
+ */
+
+static inline void
+tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
+{
+ u32 val;
+ int i;
+
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ memcpy(&val, buf, 4);
+ musb_writel(fifo, 0, val);
+ buf += 4;
+ }
+ len %= 4;
+ }
+ if (len > 0) {
+ /* Write the rest 1 - 3 bytes to FIFO */
+ memcpy(&val, buf, len);
+ musb_writel(fifo, 0, val);
+ }
+}
+
+static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
+ void __iomem *buf, u16 len)
+{
+ u32 val;
+ int i;
+
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ val = musb_readl(fifo, 0);
+ memcpy(buf, &val, 4);
+ buf += 4;
+ }
+ len %= 4;
+ }
+ if (len > 0) {
+ /* Read the rest 1 - 3 bytes from FIFO */
+ val = musb_readl(fifo, 0);
+ memcpy(buf, &val, len);
+ }
+}
+
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+{
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+
+ prefetch(buf);
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', epnum, fifo, len, buf);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
+ TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+ /* Best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) buf) == 0) {
+ if (len >= 4) {
+ writesl(fifo, buf, len >> 2);
+ buf += (len & ~0x03);
+ len &= 0x03;
+ }
+ } else {
+ if (len >= 2) {
+ u32 val;
+ int i;
+
+ /* Cannot use writesw, fifo is 32-bit */
+ for (i = 0; i < (len >> 2); i++) {
+ val = (u32)(*(u16 *)buf);
+ buf += 2;
+ val |= (*(u16 *)buf) << 16;
+ buf += 2;
+ musb_writel(fifo, 0, val);
+ }
+ len &= 0x03;
+ }
+ }
+ }
+
+ if (len > 0)
+ tusb_fifo_write_unaligned(fifo, buf, len);
+}
+
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+{
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', epnum, fifo, len, buf);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+ /* Best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) buf) == 0) {
+ if (len >= 4) {
+ readsl(fifo, buf, len >> 2);
+ buf += (len & ~0x03);
+ len &= 0x03;
+ }
+ } else {
+ if (len >= 2) {
+ u32 val;
+ int i;
+
+ /* Cannot use readsw, fifo is 32-bit */
+ for (i = 0; i < (len >> 2); i++) {
+ val = musb_readl(fifo, 0);
+ *(u16 *)buf = (u16)(val & 0xffff);
+ buf += 2;
+ *(u16 *)buf = (u16)(val >> 16);
+ buf += 2;
+ }
+ len &= 0x03;
+ }
+ }
+ }
+
+ if (len > 0)
+ tusb_fifo_read_unaligned(fifo, buf, len);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* This is used by gadget drivers, and OTG transceiver logic, allowing
+ * at most mA current to be drawn from VBUS during a Default-B session
+ * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
+ * mode), or low power Default-B sessions, something else supplies power.
+ * Caller must take care of locking.
+ */
+static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
+{
+ struct musb *musb = container_of(x, struct musb, xceiv);
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ /*
+ * Keep clock active when enabled. Note that this is not tied to
+ * drawing VBUS, as with OTG mA can be less than musb->min_power.
+ */
+ if (musb->set_clock) {
+ if (mA)
+ musb->set_clock(musb->clock, 1);
+ else
+ musb->set_clock(musb->clock, 0);
+ }
+
+ /* tps65030 seems to consume max 100mA, with maybe 60mA available
+ * (measured on one board) for things other than tps and tusb.
+ *
+ * Boards sharing the CPU clock with CLKIN will need to prevent
+ * certain idle sleep states while the USB link is active.
+ *
+ * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
+ * The actual current usage would be very board-specific. For now,
+ * it's simpler to just use an aggregate (also board-specific).
+ */
+ if (x->default_a || mA < (musb->min_power << 1))
+ mA = 0;
+
+ reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ if (mA) {
+ musb->is_bus_powered = 1;
+ reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
+ } else {
+ musb->is_bus_powered = 0;
+ reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+ }
+ musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+ DBG(2, "draw max %d mA VBUS\n", mA);
+ return 0;
+}
+
+#else
+#define tusb_draw_power NULL
+#endif
+
+/* workaround for issue 13: change clock during chip idle
+ * (to be fixed in rev3 silicon) ... symptoms include disconnect
+ * or looping suspend/resume cycles
+ */
+static void tusb_set_clock_source(struct musb *musb, unsigned mode)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ reg = musb_readl(tbase, TUSB_PRCM_CONF);
+ reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
+
+ /* 0 = refclk (clkin, XI)
+ * 1 = PHY 60 MHz (internal PLL)
+ * 2 = not supported
+ * 3 = what?
+ */
+ if (mode > 0)
+ reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
+
+ musb_writel(tbase, TUSB_PRCM_CONF, reg);
+
+ /* FIXME tusb6010_platform_retime(mode == 0); */
+}
+
+/*
+ * Idle TUSB6010 until next wake-up event; NOR access always wakes.
+ * Other code ensures that we idle unless we're connected _and_ the
+ * USB link is not suspended ... and tells us the relevant wakeup
+ * events. SW_EN for voltage is handled separately.
+ */
+void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ if ((wakeup_enables & TUSB_PRCM_WBUS)
+ && (tusb_get_revision(musb) == TUSB_REV_30))
+ tusb_wbus_quirk(musb, 1);
+
+ tusb_set_clock_source(musb, 0);
+
+ wakeup_enables |= TUSB_PRCM_WNORCS;
+ musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
+
+ /* REVISIT writeup of WID implies that if WID set and ID is grounded,
+ * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
+ * Presumably that's mostly to save power, hence WID is immaterial ...
+ */
+
+ reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ /* issue 4: when driving vbus, use hipower (vbus_det) comparator */
+ if (is_host_active(musb)) {
+ reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+ } else {
+ reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+ reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ }
+ reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
+ musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+ DBG(6, "idle, wake on %02x\n", wakeup_enables);
+}
+
+/*
+ * Updates cable VBUS status. Caller must take care of locking.
+ */
+int musb_platform_get_vbus_status(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 otg_stat, prcm_mngmt;
+ int ret = 0;
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
+
+ /* Temporarily enable VBUS detection if it was disabled for
+ * suspend mode. Unless it's enabled otg_stat and devctl will
+ * not show correct VBUS state.
+ */
+ if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
+ u32 tmp = prcm_mngmt;
+ tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
+ }
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
+ ret = 1;
+
+ return ret;
+}
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_BCON:
+ if ((musb->a_wait_bcon != 0)
+ && (musb->idle_timeout == 0
+ || time_after(jiffies, musb->idle_timeout))) {
+ DBG(4, "Nothing connected %s, turning off VBUS\n",
+ otg_state_string(musb));
+ }
+ /* FALLTHROUGH */
+ case OTG_STATE_A_IDLE:
+ tusb_source_power(musb, 0);
+ default:
+ break;
+ }
+
+ if (!musb->is_active) {
+ u32 wakeups;
+
+ /* wait until khubd handles port change status */
+ if (is_host_active(musb) && (musb->port1_status >> 16))
+ goto done;
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_peripheral_enabled(musb) && !musb->gadget_driver)
+ wakeups = 0;
+ else {
+ wakeups = TUSB_PRCM_WHOSTDISCON
+ | TUSB_PRCM_WBUS
+ | TUSB_PRCM_WVBUS;
+ if (is_otg_enabled(musb))
+ wakeups |= TUSB_PRCM_WID;
+ }
+#else
+ wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
+#endif
+ tusb_allow_idle(musb, wakeups);
+ }
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/*
+ * Maybe put TUSB6010 into idle mode mode depending on USB link status,
+ * like "disconnected" or "suspended". We'll be woken out of it by
+ * connect, resume, or disconnect.
+ *
+ * Needs to be called as the last function everywhere where there is
+ * register access to TUSB6010 because of NOR flash wake-up.
+ * Caller should own controller spinlock.
+ *
+ * Delay because peripheral enables D+ pullup 3msec after SE0, and
+ * we don't want to treat that full speed J as a wakeup event.
+ * ... peripherals must draw only suspend current after 10 msec.
+ */
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+ unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = default_timeout;
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+ DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+ del_timer(&musb_idle_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&musb_idle_timer))
+ last_timer = timeout;
+ else {
+ DBG(4, "Longer idle timer already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ DBG(4, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb_idle_timer, timeout);
+}
+
+/* ticks of 60 MHz clock */
+#define DEVCLOCK 60000000
+#define OTG_TIMER_MS(msecs) ((msecs) \
+ ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
+ | TUSB_DEV_OTG_TIMER_ENABLE) \
+ : 0)
+
+static void tusb_source_power(struct musb *musb, int is_on)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 conf, prcm, timer;
+ u8 devctl;
+
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ conf = musb_readl(tbase, TUSB_DEV_CONF);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ conf |= TUSB_DEV_CONF_USB_HOST_MODE;
+ MUSB_HST_MODE(musb);
+ } else {
+ u32 otg_stat;
+
+ timer = 0;
+
+ /* If ID pin is grounded, we want to be a_idle */
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_BCON:
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ }
+ musb->is_active = 0;
+ musb->xceiv.default_a = 1;
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ }
+
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ }
+ prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+
+ musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
+ musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
+ musb_writel(tbase, TUSB_DEV_CONF, conf);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
+ otg_state_string(musb),
+ musb_readb(musb->mregs, MUSB_DEVCTL),
+ musb_readl(tbase, TUSB_DEV_OTG_STAT),
+ conf, prcm);
+}
+
+/*
+ * Sets the mode to OTG, peripheral or host by changing the ID detection.
+ * Caller must take care of locking.
+ *
+ * Note that if a mini-A cable is plugged in the ID line will stay down as
+ * the weak ID pull-up is not able to pull the ID up.
+ *
+ * REVISIT: It would be possible to add support for changing between host
+ * and peripheral modes in non-OTG configurations by reconfiguring hardware
+ * and then setting musb->board_mode. For now, only support OTG mode.
+ */
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
+
+ if (musb->board_mode != MUSB_OTG) {
+ ERR("Changing mode currently only supported in OTG mode\n");
+ return;
+ }
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
+
+ switch (musb_mode) {
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case MUSB_HOST: /* Disable PHY ID detect, ground ID */
+ phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf |= TUSB_DEV_CONF_ID_SEL;
+ dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
+ break;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
+ phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+ break;
+#endif
+
+#ifdef CONFIG_USB_MUSB_OTG
+ case MUSB_OTG: /* Use PHY ID detection */
+ phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+ break;
+#endif
+
+ default:
+ DBG(2, "Trying to set unknown mode %i\n", musb_mode);
+ }
+
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL,
+ TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
+ TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
+ musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ if ((musb_mode == MUSB_PERIPHERAL) &&
+ !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
+ INFO("Cannot be peripheral with mini-A cable "
+ "otg_stat: %08x\n", otg_stat);
+}
+
+static inline unsigned long
+tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
+{
+ u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ unsigned long idle_timeout = 0;
+
+ /* ID pin */
+ if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
+ int default_a;
+
+ if (is_otg_enabled(musb))
+ default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
+ else
+ default_a = is_host_enabled(musb);
+ DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
+ musb->xceiv.default_a = default_a;
+ tusb_source_power(musb, default_a);
+
+ /* Don't allow idling immediately */
+ if (default_a)
+ idle_timeout = jiffies + (HZ * 3);
+ }
+
+ /* VBUS state change */
+ if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
+
+ /* B-dev state machine: no vbus ~= disconnect */
+ if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
+ || !is_host_enabled(musb)) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* ? musb_root_disconnect(musb); */
+ musb->port1_status &=
+ ~(USB_PORT_STAT_CONNECTION
+ | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED
+ | USB_PORT_STAT_HIGH_SPEED
+ | USB_PORT_STAT_TEST
+ );
+#endif
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
+ DBG(1, "Forcing disconnect (no interrupt)\n");
+ if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+ /* INTR_DISCONNECT can hide... */
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ }
+ musb->is_active = 0;
+ }
+ DBG(2, "vbus change, %s, otg %03x\n",
+ otg_state_string(musb), otg_stat);
+ idle_timeout = jiffies + (1 * HZ);
+ schedule_work(&musb->irq_work);
+
+ } else /* A-dev state machine */ {
+ DBG(2, "vbus change, %s, otg %03x\n",
+ otg_state_string(musb), otg_stat);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_IDLE:
+ DBG(2, "Got SRP, turning on VBUS\n");
+ musb_set_vbus(musb, 1);
+
+ /* CONNECT can wake if a_wait_bcon is set */
+ if (musb->a_wait_bcon != 0)
+ musb->is_active = 0;
+ else
+ musb->is_active = 1;
+
+ /*
+ * OPT FS A TD.4.6 needs few seconds for
+ * A_WAIT_VRISE
+ */
+ idle_timeout = jiffies + (2 * HZ);
+
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ /* ignore; A-session-valid < VBUS_VALID/2,
+ * we monitor this with the timer
+ */
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /* REVISIT this irq triggers during short
+ * spikes caused by enumeration ...
+ */
+ if (musb->vbuserr_retry) {
+ musb->vbuserr_retry--;
+ tusb_source_power(musb, 1);
+ } else {
+ musb->vbuserr_retry
+ = VBUSERR_RETRY_COUNT;
+ tusb_source_power(musb, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ /* OTG timer expiration */
+ if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
+ u8 devctl;
+
+ DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ /* VBUS has probably been valid for a while now,
+ * but may well have bounced out of range a bit
+ */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ != MUSB_DEVCTL_VBUS) {
+ DBG(2, "devctl %02x\n", devctl);
+ break;
+ }
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ idle_timeout = jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon);
+ } else {
+ /* REVISIT report overcurrent to hub? */
+ ERR("vbus too slow, devctl %02x\n", devctl);
+ tusb_source_power(musb, 0);
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ idle_timeout = jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon);
+ break;
+ case OTG_STATE_A_SUSPEND:
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ break;
+ default:
+ break;
+ }
+ }
+ schedule_work(&musb->irq_work);
+
+ return idle_timeout;
+}
+
+static irqreturn_t tusb_interrupt(int irq, void *__hci)
+{
+ struct musb *musb = __hci;
+ void __iomem *tbase = musb->ctrl_base;
+ unsigned long flags, idle_timeout = 0;
+ u32 int_mask, int_src;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Mask all interrupts to allow using both edge and level GPIO irq */
+ int_mask = musb_readl(tbase, TUSB_INT_MASK);
+ musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+
+ int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
+ DBG(3, "TUSB IRQ %08x\n", int_src);
+
+ musb->int_usb = (u8) int_src;
+
+ /* Acknowledge wake-up source interrupts */
+ if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
+ u32 reg;
+ u32 i;
+
+ if (tusb_get_revision(musb) == TUSB_REV_30)
+ tusb_wbus_quirk(musb, 0);
+
+ /* there are issues re-locking the PLL on wakeup ... */
+
+ /* work around issue 8 */
+ for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
+ musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
+ musb_writel(tbase, TUSB_SCRATCH_PAD, i);
+ reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
+ if (reg == i)
+ break;
+ DBG(6, "TUSB NOR not ready\n");
+ }
+
+ /* work around issue 13 (2nd half) */
+ tusb_set_clock_source(musb, 1);
+
+ reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
+ musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
+ if (reg & ~TUSB_PRCM_WNORCS) {
+ musb->is_active = 1;
+ schedule_work(&musb->irq_work);
+ }
+ DBG(3, "wake %sactive %02x\n",
+ musb->is_active ? "" : "in", reg);
+
+ /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
+ }
+
+ if (int_src & TUSB_INT_SRC_USB_IP_CONN)
+ del_timer(&musb_idle_timer);
+
+ /* OTG state change reports (annoyingly) not issued by Mentor core */
+ if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
+ | TUSB_INT_SRC_OTG_TIMEOUT
+ | TUSB_INT_SRC_ID_STATUS_CHNG))
+ idle_timeout = tusb_otg_ints(musb, int_src, tbase);
+
+ /* TX dma callback must be handled here, RX dma callback is
+ * handled in tusb_omap_dma_cb.
+ */
+ if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
+ u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
+ u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
+
+ DBG(3, "DMA IRQ %08x\n", dma_src);
+ real_dma_src = ~real_dma_src & dma_src;
+ if (tusb_dma_omap() && real_dma_src) {
+ int tx_source = (real_dma_src & 0xffff);
+ int i;
+
+ for (i = 1; i <= 15; i++) {
+ if (tx_source & (1 << i)) {
+ DBG(3, "completing ep%i %s\n", i, "tx");
+ musb_dma_completion(musb, i, 1);
+ }
+ }
+ }
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
+ }
+
+ /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
+ u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
+
+ musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
+ musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
+ musb->int_tx = (musb_src & 0xffff);
+ } else {
+ musb->int_rx = 0;
+ musb->int_tx = 0;
+ }
+
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
+ musb_interrupt(musb);
+
+ /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
+ musb_writel(tbase, TUSB_INT_SRC_CLEAR,
+ int_src & ~TUSB_INT_MASK_RESERVED_BITS);
+
+ musb_platform_try_idle(musb, idle_timeout);
+
+ musb_writel(tbase, TUSB_INT_MASK, int_mask);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int dma_off;
+
+/*
+ * Enables TUSB6010. Caller must take care of locking.
+ * REVISIT:
+ * - Check what is unnecessary in MGC_HdrcStart()
+ */
+void musb_platform_enable(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
+ * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
+ musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
+
+ /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
+ musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
+ musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ /* Clear all subsystem interrups */
+ musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
+
+ /* Acknowledge pending interrupt(s) */
+ musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
+
+ /* Only 0 clock cycles for minimum interrupt de-assertion time and
+ * interrupt polarity active low seems to work reliably here */
+ musb_writel(tbase, TUSB_INT_CTRL_CONF,
+ TUSB_INT_CTRL_CONF_INT_RELCYC(0));
+
+ set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+
+ /* maybe force into the Default-A OTG state machine */
+ if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
+ & TUSB_DEV_OTG_STAT_ID_STATUS))
+ musb_writel(tbase, TUSB_INT_SRC_SET,
+ TUSB_INT_SRC_ID_STATUS_CHNG);
+
+ if (is_dma_capable() && dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __func__);
+ else
+ dma_off = 1;
+}
+
+/*
+ * Disables TUSB6010. Caller must take care of locking.
+ */
+void musb_platform_disable(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /* FIXME stop DMA, IRQs, timers, ... */
+
+ /* disable all IRQs */
+ musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+ musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ del_timer(&musb_idle_timer);
+
+ if (is_dma_capable() && !dma_off) {
+ printk(KERN_WARNING "%s %s: dma still active\n",
+ __FILE__, __func__);
+ dma_off = 1;
+ }
+}
+
+/*
+ * Sets up TUSB6010 CPU interface specific signals and registers
+ * Note: Settings optimized for OMAP24xx
+ */
+static void __init tusb_setup_cpu_interface(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /*
+ * Disable GPIO[5:0] pullups (used as output DMA requests)
+ * Don't disable GPIO[7:6] as they are needed for wake-up.
+ */
+ musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
+
+ /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
+ musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
+
+ /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
+ musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
+
+ /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
+ * de-assertion time 2 system clocks p 62 */
+ musb_writel(tbase, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ /* Set 0 wait count for synchronous burst access */
+ musb_writel(tbase, TUSB_WAIT_COUNT, 1);
+}
+
+static int __init tusb_start(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ int ret = 0;
+ unsigned long flags;
+ u32 reg;
+
+ if (musb->board_set_power)
+ ret = musb->board_set_power(1);
+ if (ret != 0) {
+ printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
+ TUSB_PROD_TEST_RESET_VAL) {
+ printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
+ goto err;
+ }
+
+ ret = tusb_print_revision(musb);
+ if (ret < 2) {
+ printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
+ ret);
+ goto err;
+ }
+
+ /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
+ * NOR FLASH interface is used */
+ musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
+
+ /* Select PHY free running 60MHz as a system clock */
+ tusb_set_clock_source(musb, 1);
+
+ /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
+ * power saving, enable VBus detect and session end comparators,
+ * enable IDpullup, enable VBus charging */
+ musb_writel(tbase, TUSB_PRCM_MNGMT,
+ TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
+ TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
+ TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
+ TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
+ TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
+ tusb_setup_cpu_interface(musb);
+
+ /* simplify: always sense/pullup ID pins, as if in OTG mode */
+ reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
+
+ reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ return -ENODEV;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ struct platform_device *pdev;
+ struct resource *mem;
+ void __iomem *sync;
+ int ret;
+
+ pdev = to_platform_device(musb->controller);
+
+ /* dma address for async dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ musb->async = mem->start;
+
+ /* dma address for sync dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem) {
+ pr_debug("no sync dma resource?\n");
+ return -ENODEV;
+ }
+ musb->sync = mem->start;
+
+ sync = ioremap(mem->start, mem->end - mem->start + 1);
+ if (!sync) {
+ pr_debug("ioremap for sync failed\n");
+ return -ENOMEM;
+ }
+ musb->sync_va = sync;
+
+ /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
+ * FIFOs at 0x600, TUSB at 0x800
+ */
+ musb->mregs += TUSB_BASE_OFFSET;
+
+ ret = tusb_start(musb);
+ if (ret) {
+ printk(KERN_ERR "Could not start tusb6010 (%d)\n",
+ ret);
+ return -ENODEV;
+ }
+ musb->isr = tusb_interrupt;
+
+ if (is_host_enabled(musb))
+ musb->board_set_vbus = tusb_source_power;
+ if (is_peripheral_enabled(musb))
+ musb->xceiv.set_power = tusb_draw_power;
+
+ setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+ return ret;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+ del_timer_sync(&musb_idle_timer);
+
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ iounmap(musb->sync_va);
+
+ return 0;
+}
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
new file mode 100644
index 000000000000..ab8c96286ce6
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.h
@@ -0,0 +1,233 @@
+/*
+ * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TUSB6010_H__
+#define __TUSB6010_H__
+
+extern u8 tusb_get_revision(struct musb *musb);
+
+#ifdef CONFIG_USB_TUSB6010
+#define musb_in_tusb() 1
+#else
+#define musb_in_tusb() 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap() 1
+#else
+#define tusb_dma_omap() 0
+#endif
+
+/* VLYNQ control register. 32-bit at offset 0x000 */
+#define TUSB_VLYNQ_CTRL 0x004
+
+/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
+#define TUSB_BASE_OFFSET 0x400
+
+/* FIFO registers 32-bit at offset 0x600 */
+#define TUSB_FIFO_BASE 0x600
+
+/* Device System & Control registers. 32-bit at offset 0x800 */
+#define TUSB_SYS_REG_BASE 0x800
+
+#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000)
+#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16)
+#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15)
+#define TUSB_DEV_CONF_SOFT_ID (1 << 1)
+#define TUSB_DEV_CONF_ID_SEL (1 << 0)
+
+#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004)
+#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008)
+#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24)
+#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23)
+#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19)
+#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18)
+#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17)
+#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16)
+#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15)
+#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14)
+#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13)
+#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12)
+#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11)
+#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10)
+#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9)
+#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7)
+#define TUSB_PHY_OTG_CTRL_PD (1 << 6)
+#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5)
+#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4)
+#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3)
+#define TUSB_PHY_OTG_CTRL_RESET (1 << 2)
+#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1)
+#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0)
+
+/*OTG status register */
+#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c)
+#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8)
+#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7)
+#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6)
+#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5)
+#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4)
+#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3)
+#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2)
+#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0)
+#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1)
+#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0)
+
+#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010)
+# define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31)
+# define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff)
+#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014)
+
+/* PRCM configuration register */
+#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018)
+#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24)
+#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16)
+
+/* PRCM management register */
+#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c)
+#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25)
+#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19)
+#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18)
+#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17)
+#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10)
+#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9)
+#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8)
+#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4)
+#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3)
+#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2)
+#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1)
+#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0)
+
+/* Wake-up source clear and mask registers */
+#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020)
+#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028)
+#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c)
+#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13)
+#define TUSB_PRCM_WGPIO_7 (1 << 12)
+#define TUSB_PRCM_WGPIO_6 (1 << 11)
+#define TUSB_PRCM_WGPIO_5 (1 << 10)
+#define TUSB_PRCM_WGPIO_4 (1 << 9)
+#define TUSB_PRCM_WGPIO_3 (1 << 8)
+#define TUSB_PRCM_WGPIO_2 (1 << 7)
+#define TUSB_PRCM_WGPIO_1 (1 << 6)
+#define TUSB_PRCM_WGPIO_0 (1 << 5)
+#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */
+#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */
+#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */
+#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */
+#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */
+
+#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030)
+#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034)
+#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038)
+#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c)
+#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040)
+#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044)
+#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048)
+#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c)
+#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050)
+#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054)
+#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058)
+#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c)
+#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060)
+#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064)
+#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068)
+#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c)
+
+/* NOR flash interrupt source registers */
+#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070)
+#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074)
+#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078)
+#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c)
+#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24)
+#define TUSB_INT_SRC_USB_IP_CORE (1 << 17)
+#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16)
+#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15)
+#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14)
+#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13)
+#define TUSB_INT_SRC_DEV_READY (1 << 12)
+#define TUSB_INT_SRC_USB_IP_TX (1 << 9)
+#define TUSB_INT_SRC_USB_IP_RX (1 << 8)
+#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7)
+#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6)
+#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5)
+#define TUSB_INT_SRC_USB_IP_CONN (1 << 4)
+#define TUSB_INT_SRC_USB_IP_SOF (1 << 3)
+#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2)
+#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1)
+#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0)
+
+/* NOR flash interrupt registers reserved bits. Must be written as 0 */
+#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17)
+#define TUSB_INT_MASK_RESERVED_13 (1 << 13)
+#define TUSB_INT_MASK_RESERVED_8 (0xf << 8)
+#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26)
+#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18)
+#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10)
+
+/* Reserved bits for NOR flash interrupt mask and clear register */
+#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \
+ TUSB_INT_MASK_RESERVED_13 | \
+ TUSB_INT_MASK_RESERVED_8)
+
+/* Reserved bits for NOR flash interrupt status register */
+#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \
+ TUSB_INT_SRC_RESERVED_18 | \
+ TUSB_INT_SRC_RESERVED_10)
+
+#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080)
+#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084)
+#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100)
+#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104)
+#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108)
+#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148)
+
+/* Offsets from each ep base register */
+#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */
+#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */
+#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188
+
+#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8)
+#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4)
+#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8)
+
+/* Device System & Control register bitfields */
+#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18)
+#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17)
+#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16)
+#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24)
+#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16)
+#define TUSB_EP0_CONFIG_SW_EN (1 << 8)
+#define TUSB_EP0_CONFIG_DIR_TX (1 << 7)
+#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f)
+#define TUSB_EP_CONFIG_SW_EN (1 << 31)
+#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff)
+#define TUSB_PROD_TEST_RESET_VAL 0xa596
+#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20)
+
+#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8)
+#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc)
+#define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf)
+#define TUSB_DIDR1_HI_REV_20 0
+#define TUSB_DIDR1_HI_REV_30 1
+#define TUSB_DIDR1_HI_REV_31 2
+
+#define TUSB_REV_10 0x10
+#define TUSB_REV_20 0x20
+#define TUSB_REV_30 0x30
+#define TUSB_REV_31 0x31
+
+#endif /* __TUSB6010_H__ */
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
new file mode 100644
index 000000000000..52f7f29cebda
--- /dev/null
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -0,0 +1,719 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+
+#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
+
+#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
+
+struct tusb_omap_dma_ch {
+ struct musb *musb;
+ void __iomem *tbase;
+ unsigned long phys_offset;
+ int epnum;
+ u8 tx;
+ struct musb_hw_ep *hw_ep;
+
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+
+ struct tusb_omap_dma *tusb_dma;
+
+ void __iomem *dma_addr;
+
+ u32 len;
+ u16 packet_sz;
+ u16 transfer_packet_sz;
+ u32 transfer_len;
+ u32 completed_len;
+};
+
+struct tusb_omap_dma {
+ struct dma_controller controller;
+ struct musb *musb;
+ void __iomem *tbase;
+
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+ unsigned multichannel:1;
+};
+
+static int tusb_omap_dma_start(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+ /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+ return 0;
+}
+
+static int tusb_omap_dma_stop(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+ /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+ return 0;
+}
+
+/*
+ * Allocate dmareq0 to the current channel unless it's already taken
+ */
+static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+ if (reg != 0) {
+ DBG(3, "ep%i dmareq0 is busy for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return -EAGAIN;
+ }
+
+ if (chdat->tx)
+ reg = (1 << 4) | chdat->epnum;
+ else
+ reg = chdat->epnum;
+
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ return 0;
+}
+
+static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+ if ((reg & 0xf) != chdat->epnum) {
+ printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return;
+ }
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
+}
+
+/*
+ * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
+ * musb_gadget.c.
+ */
+static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+{
+ struct dma_channel *channel = (struct dma_channel *)data;
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *mbase = musb->mregs;
+ unsigned long remaining, flags, pio;
+ int ch;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (tusb_dma->multichannel)
+ ch = chdat->ch;
+ else
+ ch = tusb_dma->ch;
+
+ if (ch_status != OMAP_DMA_BLOCK_IRQ)
+ printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
+
+ DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ ch, ch_status);
+
+ if (chdat->tx)
+ remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
+
+ /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
+ if (unlikely(remaining > chdat->transfer_len)) {
+ DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
+ chdat->tx ? "tx" : "rx", chdat->ch,
+ remaining);
+ remaining = 0;
+ }
+
+ channel->actual_len = chdat->transfer_len - remaining;
+ pio = chdat->len - channel->actual_len;
+
+ DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
+
+ /* Transfer remaining 1 - 31 bytes */
+ if (pio > 0 && pio < 32) {
+ u8 *buf;
+
+ DBG(3, "Using PIO for remaining %lu bytes\n", pio);
+ buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
+ if (chdat->tx) {
+ dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+ chdat->transfer_len, DMA_TO_DEVICE);
+ musb_write_fifo(hw_ep, pio, buf);
+ } else {
+ musb_read_fifo(hw_ep, pio, buf);
+ dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+ chdat->transfer_len, DMA_FROM_DEVICE);
+ }
+ channel->actual_len += pio;
+ }
+
+ if (!tusb_dma->multichannel)
+ tusb_omap_free_shared_dmareq(chdat);
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ /* Handle only RX callbacks here. TX callbacks must be handled based
+ * on the TUSB DMA status interrupt.
+ * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
+ * interrupt for RX and TX.
+ */
+ if (!chdat->tx)
+ musb_dma_completion(musb, chdat->epnum, chdat->tx);
+
+ /* We must terminate short tx transfers manually by setting TXPKTRDY.
+ * REVISIT: This same problem may occur with other MUSB dma as well.
+ * Easy to test with g_ether by pinging the MUSB board with ping -s54.
+ */
+ if ((chdat->transfer_len < chdat->packet_sz)
+ || (chdat->transfer_len % chdat->packet_sz != 0)) {
+ u16 csr;
+
+ if (chdat->tx) {
+ DBG(3, "terminating short tx packet\n");
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ }
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
+ u8 rndis_mode, dma_addr_t dma_addr, u32 len)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *ep_conf = hw_ep->conf;
+ dma_addr_t fifo = hw_ep->fifo_sync;
+ struct omap_dma_channel_params dma_params;
+ u32 dma_remaining;
+ int src_burst, dst_burst;
+ u16 csr;
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+
+ if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
+ return false;
+
+ /*
+ * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
+ * register which will cause missed DMA interrupt. We could try to
+ * use a timer for the callback, but it is unsafe as the XFR_SIZE
+ * register is corrupt, and we won't know if the DMA worked.
+ */
+ if (dma_addr & 0x2)
+ return false;
+
+ /*
+ * Because of HW issue #10, it seems like mixing sync DMA and async
+ * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
+ * using the channel for DMA.
+ */
+ if (chdat->tx)
+ dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
+ if (dma_remaining) {
+ DBG(2, "Busy %s dma ch%i, not using: %08x\n",
+ chdat->tx ? "tx" : "rx", chdat->ch,
+ dma_remaining);
+ return false;
+ }
+
+ chdat->transfer_len = len & ~0x1f;
+
+ if (len < packet_sz)
+ chdat->transfer_packet_sz = chdat->transfer_len;
+ else
+ chdat->transfer_packet_sz = packet_sz;
+
+ if (tusb_dma->multichannel) {
+ ch = chdat->ch;
+ dmareq = chdat->dmareq;
+ sync_dev = chdat->sync_dev;
+ } else {
+ if (tusb_omap_use_shared_dmareq(chdat) != 0) {
+ DBG(3, "could not get dma for ep%i\n", chdat->epnum);
+ return false;
+ }
+ if (tusb_dma->ch < 0) {
+ /* REVISIT: This should get blocked earlier, happens
+ * with MSC ErrorRecoveryTest
+ */
+ WARN_ON(1);
+ return false;
+ }
+
+ ch = tusb_dma->ch;
+ dmareq = tusb_dma->dmareq;
+ sync_dev = tusb_dma->sync_dev;
+ omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
+ }
+
+ chdat->packet_sz = packet_sz;
+ chdat->len = len;
+ channel->actual_len = 0;
+ chdat->dma_addr = (void __iomem *)dma_addr;
+ channel->status = MUSB_DMA_STATUS_BUSY;
+
+ /* Since we're recycling dma areas, we need to clean or invalidate */
+ if (chdat->tx)
+ dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE);
+ else
+ dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE);
+
+ /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
+ if ((dma_addr & 0x3) == 0) {
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.elem_count = 8; /* Elements in frame */
+ } else {
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
+ dma_params.elem_count = 16; /* Elements in frame */
+ fifo = hw_ep->fifo_async;
+ }
+
+ dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */
+
+ DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ ch, dma_addr, chdat->transfer_len, len,
+ chdat->transfer_packet_sz, packet_sz);
+
+ /*
+ * Prepare omap DMA for transfer
+ */
+ if (chdat->tx) {
+ dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.src_start = (unsigned long)dma_addr;
+ dma_params.src_ei = 0;
+ dma_params.src_fi = 0;
+
+ dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.dst_start = (unsigned long)fifo;
+ dma_params.dst_ei = 1;
+ dma_params.dst_fi = -31; /* Loop 32 byte window */
+
+ dma_params.trigger = sync_dev;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_or_dst_synch = 0; /* Dest sync */
+
+ src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */
+ dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */
+ } else {
+ dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.src_start = (unsigned long)fifo;
+ dma_params.src_ei = 1;
+ dma_params.src_fi = -31; /* Loop 32 byte window */
+
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.dst_start = (unsigned long)dma_addr;
+ dma_params.dst_ei = 0;
+ dma_params.dst_fi = 0;
+
+ dma_params.trigger = sync_dev;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_or_dst_synch = 1; /* Source sync */
+
+ src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */
+ dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */
+ }
+
+ DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
+ ((dma_addr & 0x3) == 0) ? "sync" : "async",
+ dma_params.src_start, dma_params.dst_start);
+
+ omap_set_dma_params(ch, &dma_params);
+ omap_set_dma_src_burst_mode(ch, src_burst);
+ omap_set_dma_dest_burst_mode(ch, dst_burst);
+ omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
+
+ /*
+ * Prepare MUSB for DMA transfer
+ */
+ if (chdat->tx) {
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ } else {
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_DMAENAB;
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
+ musb_writew(hw_ep->regs, MUSB_RXCSR,
+ csr | MUSB_RXCSR_P_WZC_BITS);
+ }
+
+ /*
+ * Start DMA transfer
+ */
+ omap_start_dma(ch);
+
+ if (chdat->tx) {
+ /* Send transfer_packet_sz packets at a time */
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+ chdat->transfer_packet_sz);
+
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+ } else {
+ /* Receive transfer_packet_sz packets at a time */
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+ chdat->transfer_packet_sz << 16);
+
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+ }
+
+ return true;
+}
+
+static int tusb_omap_dma_abort(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+
+ if (!tusb_dma->multichannel) {
+ if (tusb_dma->ch >= 0) {
+ omap_stop_dma(tusb_dma->ch);
+ omap_free_dma(tusb_dma->ch);
+ tusb_dma->ch = -1;
+ }
+
+ tusb_dma->dmareq = -1;
+ tusb_dma->sync_dev = -1;
+ }
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return 0;
+}
+
+static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+ int i, dmareq_nr = -1;
+
+ const int sync_dev[6] = {
+ OMAP24XX_DMA_EXT_DMAREQ0,
+ OMAP24XX_DMA_EXT_DMAREQ1,
+ OMAP242X_DMA_EXT_DMAREQ2,
+ OMAP242X_DMA_EXT_DMAREQ3,
+ OMAP242X_DMA_EXT_DMAREQ4,
+ OMAP242X_DMA_EXT_DMAREQ5,
+ };
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ int cur = (reg & (0xf << (i * 5))) >> (i * 5);
+ if (cur == 0) {
+ dmareq_nr = i;
+ break;
+ }
+ }
+
+ if (dmareq_nr == -1)
+ return -EAGAIN;
+
+ reg |= (chdat->epnum << (dmareq_nr * 5));
+ if (chdat->tx)
+ reg |= ((1 << 4) << (dmareq_nr * 5));
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dmareq = dmareq_nr;
+ chdat->sync_dev = sync_dev[chdat->dmareq];
+
+ return 0;
+}
+
+static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg;
+
+ if (!chdat || chdat->dmareq < 0)
+ return;
+
+ reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+ reg &= ~(0x1f << (chdat->dmareq * 5));
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dmareq = -1;
+ chdat->sync_dev = -1;
+}
+
+static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
+
+static struct dma_channel *
+tusb_omap_dma_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep,
+ u8 tx)
+{
+ int ret, i;
+ const char *dev_name;
+ struct tusb_omap_dma *tusb_dma;
+ struct musb *musb;
+ void __iomem *tbase;
+ struct dma_channel *channel = NULL;
+ struct tusb_omap_dma_ch *chdat = NULL;
+ u32 reg;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ musb = tusb_dma->musb;
+ tbase = musb->ctrl_base;
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+ if (tx)
+ reg &= ~(1 << hw_ep->epnum);
+ else
+ reg &= ~(1 << (hw_ep->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+ /* REVISIT: Why does dmareq5 not work? */
+ if (hw_ep->epnum == 0) {
+ DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
+ return NULL;
+ }
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
+ ch->status = MUSB_DMA_STATUS_FREE;
+ channel = ch;
+ chdat = ch->private_data;
+ break;
+ }
+ }
+
+ if (!channel)
+ return NULL;
+
+ if (tx) {
+ chdat->tx = 1;
+ dev_name = "TUSB transmit";
+ } else {
+ chdat->tx = 0;
+ dev_name = "TUSB receive";
+ }
+
+ chdat->musb = tusb_dma->musb;
+ chdat->tbase = tusb_dma->tbase;
+ chdat->hw_ep = hw_ep;
+ chdat->epnum = hw_ep->epnum;
+ chdat->dmareq = -1;
+ chdat->completed_len = 0;
+ chdat->tusb_dma = tusb_dma;
+
+ channel->max_len = 0x7fffffff;
+ channel->desired_mode = 0;
+ channel->actual_len = 0;
+
+ if (tusb_dma->multichannel) {
+ ret = tusb_omap_dma_allocate_dmareq(chdat);
+ if (ret != 0)
+ goto free_dmareq;
+
+ ret = omap_request_dma(chdat->sync_dev, dev_name,
+ tusb_omap_dma_cb, channel, &chdat->ch);
+ if (ret != 0)
+ goto free_dmareq;
+ } else if (tusb_dma->ch == -1) {
+ tusb_dma->dmareq = 0;
+ tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
+
+ /* Callback data gets set later in the shared dmareq case */
+ ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
+ tusb_omap_dma_cb, NULL, &tusb_dma->ch);
+ if (ret != 0)
+ goto free_dmareq;
+
+ chdat->dmareq = -1;
+ chdat->ch = -1;
+ }
+
+ DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
+ chdat->epnum,
+ chdat->tx ? "tx" : "rx",
+ chdat->ch >= 0 ? "dedicated" : "shared",
+ chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
+ chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
+ chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
+
+ return channel;
+
+free_dmareq:
+ tusb_omap_dma_free_dmareq(chdat);
+
+ DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum);
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+ return NULL;
+}
+
+static void tusb_omap_dma_release(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct musb *musb = chdat->musb;
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch);
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+ if (chdat->tx)
+ reg |= (1 << chdat->epnum);
+ else
+ reg |= (1 << (chdat->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR);
+ if (chdat->tx)
+ reg |= (1 << chdat->epnum);
+ else
+ reg |= (1 << (chdat->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg);
+
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+ if (chdat->ch >= 0) {
+ omap_stop_dma(chdat->ch);
+ omap_free_dma(chdat->ch);
+ chdat->ch = -1;
+ }
+
+ if (chdat->dmareq >= 0)
+ tusb_omap_dma_free_dmareq(chdat);
+
+ channel = NULL;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch) {
+ kfree(ch->private_data);
+ kfree(ch);
+ }
+ }
+
+ if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+ omap_free_dma(tusb_dma->ch);
+
+ kfree(tusb_dma);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ /* REVISIT: Get dmareq lines used from board-*.c */
+
+ musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
+
+ musb_writel(tbase, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2)
+ | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
+ | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
+ if (!tusb_dma)
+ goto cleanup;
+
+ tusb_dma->musb = musb;
+ tusb_dma->tbase = musb->ctrl_base;
+
+ tusb_dma->ch = -1;
+ tusb_dma->dmareq = -1;
+ tusb_dma->sync_dev = -1;
+
+ tusb_dma->controller.start = tusb_omap_dma_start;
+ tusb_dma->controller.stop = tusb_omap_dma_stop;
+ tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
+ tusb_dma->controller.channel_release = tusb_omap_dma_release;
+ tusb_dma->controller.channel_program = tusb_omap_dma_program;
+ tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
+
+ if (tusb_get_revision(musb) >= TUSB_REV_30)
+ tusb_dma->multichannel = 1;
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch;
+ struct tusb_omap_dma_ch *chdat;
+
+ ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
+ if (!ch)
+ goto cleanup;
+
+ dma_channel_pool[i] = ch;
+
+ chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
+ if (!chdat)
+ goto cleanup;
+
+ ch->status = MUSB_DMA_STATUS_UNKNOWN;
+ ch->private_data = chdat;
+ }
+
+ return &tusb_dma->controller;
+
+cleanup:
+ dma_controller_destroy(&tusb_dma->controller);
+
+ return NULL;
+}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8878c1767fc8..70338f4ec918 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -499,9 +499,10 @@ config USB_SERIAL_SAFE_PADDED
config USB_SERIAL_SIERRAWIRELESS
tristate "USB Sierra Wireless Driver"
help
- Say M here if you want to use a Sierra Wireless device (if
- using an PC 5220 or AC580 please use the Airprime driver
- instead).
+ Say M here if you want to use Sierra Wireless devices.
+
+ Many deviecs have a feature known as TRU-Install, for those devices
+ to work properly the USB Storage Sierra feature must be enabled.
To compile this driver as a module, choose M here: the
module will be called sierra.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 838717250145..984f6eff4c47 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -563,6 +563,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -637,6 +638,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
@@ -646,6 +648,10 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
{ }, /* Optional parameter entry */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index a577ea44dcf9..382265bba969 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -524,7 +524,9 @@
#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
+#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
+#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
/*
* Definitions for ID TECH (www.idt-net.com) devices
@@ -815,6 +817,11 @@
#define OLIMEX_VID 0x15BA
#define OLIMEX_ARM_USB_OCD_PID 0x0003
+/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
+/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
+#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
+#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
+
/* www.elsterelectricity.com Elster Unicom III Optical Probe */
#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e4eca95f2b0f..e143198aeb02 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -186,6 +186,23 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define BANDRICH_VENDOR_ID 0x1A8D
#define BANDRICH_PRODUCT_C100_1 0x1002
#define BANDRICH_PRODUCT_C100_2 0x1003
+#define BANDRICH_PRODUCT_1004 0x1004
+#define BANDRICH_PRODUCT_1005 0x1005
+#define BANDRICH_PRODUCT_1006 0x1006
+#define BANDRICH_PRODUCT_1007 0x1007
+#define BANDRICH_PRODUCT_1008 0x1008
+#define BANDRICH_PRODUCT_1009 0x1009
+#define BANDRICH_PRODUCT_100A 0x100a
+
+#define BANDRICH_PRODUCT_100B 0x100b
+#define BANDRICH_PRODUCT_100C 0x100c
+#define BANDRICH_PRODUCT_100D 0x100d
+#define BANDRICH_PRODUCT_100E 0x100e
+
+#define BANDRICH_PRODUCT_100F 0x100f
+#define BANDRICH_PRODUCT_1010 0x1010
+#define BANDRICH_PRODUCT_1011 0x1011
+#define BANDRICH_PRODUCT_1012 0x1012
#define AMOI_VENDOR_ID 0x1614
#define AMOI_PRODUCT_9508 0x0800
@@ -197,6 +214,10 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003
+/* ZTE PRODUCTS */
+#define ZTE_VENDOR_ID 0x19d2
+#define ZTE_PRODUCT_MF628 0x0015
+
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -302,12 +323,28 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -346,11 +383,7 @@ static struct usb_serial_driver option_1port_device = {
.read_int_callback = option_instat_callback,
};
-#ifdef CONFIG_USB_DEBUG
static int debug;
-#else
-#define debug 0
-#endif
/* per port private data */
@@ -954,8 +987,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-#ifdef CONFIG_USB_DEBUG
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug messages");
-#endif
-
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 2c9c446ad625..1ede1441cb1b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -90,7 +90,6 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
{ USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
{ USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
- { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) },
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 6ac3bbcf7a22..a3bd039c78e9 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -107,10 +107,6 @@
#define COREGA_VENDOR_ID 0x07aa
#define COREGA_PRODUCT_ID 0x002a
-/* HL HL-340 (ID: 4348:5523) */
-#define HL340_VENDOR_ID 0x4348
-#define HL340_PRODUCT_ID 0x5523
-
/* Y.C. Cable U.S.A., Inc - USB to RS-232 */
#define YCCABLE_VENDOR_ID 0x05ad
#define YCCABLE_PRODUCT_ID 0x0fba
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 2f6f1523ec56..706033753adb 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -14,7 +14,7 @@
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
*/
-#define DRIVER_VERSION "v.1.2.9c"
+#define DRIVER_VERSION "v.1.2.13a"
#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
@@ -31,6 +31,7 @@
#define SWIMS_USB_REQUEST_SetPower 0x00
#define SWIMS_USB_REQUEST_SetNmea 0x07
#define SWIMS_USB_REQUEST_SetMode 0x0B
+#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
#define SWIMS_SET_MODE_Modem 0x0001
/* per port private data */
@@ -40,18 +41,11 @@
static int debug;
static int nmea;
-static int truinstall = 1;
-
-enum devicetype {
- DEVICE_3_PORT = 0,
- DEVICE_1_PORT = 1,
- DEVICE_INSTALLER = 2,
-};
static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
{
int result;
- dev_dbg(&udev->dev, "%s", "SET POWER STATE\n");
+ dev_dbg(&udev->dev, "%s", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetPower, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
@@ -63,25 +57,10 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
return result;
}
-static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
-{
- int result;
- dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n");
- result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- SWIMS_USB_REQUEST_SetMode, /* __u8 request */
- USB_TYPE_VENDOR, /* __u8 request type */
- eSWocMode, /* __u16 value */
- 0x0000, /* __u16 index */
- NULL, /* void *data */
- 0, /* __u16 size */
- USB_CTRL_SET_TIMEOUT); /* int timeout */
- return result;
-}
-
static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
{
int result;
- dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n");
+ dev_dbg(&udev->dev, "%s", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetNmea, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
@@ -97,6 +76,7 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
{
int result;
int *num_ports = usb_get_serial_data(serial);
+ dev_dbg(&serial->dev->dev, "%s", __func__);
result = *num_ports;
@@ -110,22 +90,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
static int sierra_calc_interface(struct usb_serial *serial)
{
- int interface;
- struct usb_interface *p_interface;
- struct usb_host_interface *p_host_interface;
+ int interface;
+ struct usb_interface *p_interface;
+ struct usb_host_interface *p_host_interface;
+ dev_dbg(&serial->dev->dev, "%s", __func__);
- /* Get the interface structure pointer from the serial struct */
- p_interface = serial->interface;
+ /* Get the interface structure pointer from the serial struct */
+ p_interface = serial->interface;
- /* Get a pointer to the host interface structure */
- p_host_interface = p_interface->cur_altsetting;
+ /* Get a pointer to the host interface structure */
+ p_host_interface = p_interface->cur_altsetting;
- /* read the interface descriptor for this active altsetting
- * to find out the interface number we are on
- */
- interface = p_host_interface->desc.bInterfaceNumber;
+ /* read the interface descriptor for this active altsetting
+ * to find out the interface number we are on
+ */
+ interface = p_host_interface->desc.bInterfaceNumber;
- return interface;
+ return interface;
}
static int sierra_probe(struct usb_serial *serial,
@@ -135,43 +116,40 @@ static int sierra_probe(struct usb_serial *serial,
struct usb_device *udev;
int *num_ports;
u8 ifnum;
+ u8 numendpoints;
+
+ dev_dbg(&serial->dev->dev, "%s", __func__);
num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
if (!num_ports)
return -ENOMEM;
ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
udev = serial->dev;
- /* Figure out the interface number from the serial structure */
- ifnum = sierra_calc_interface(serial);
-
- /*
- * If this interface supports more than 1 alternate
- * select the 2nd one
- */
- if (serial->interface->num_altsetting == 2) {
- dev_dbg(&udev->dev,
- "Selecting alt setting for interface %d\n",
- ifnum);
+ /* Figure out the interface number from the serial structure */
+ ifnum = sierra_calc_interface(serial);
- /* We know the alternate setting is 1 for the MC8785 */
- usb_set_interface(udev, ifnum, 1);
- }
+ /*
+ * If this interface supports more than 1 alternate
+ * select the 2nd one
+ */
+ if (serial->interface->num_altsetting == 2) {
+ dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n",
+ ifnum);
+ /* We know the alternate setting is 1 for the MC8785 */
+ usb_set_interface(udev, ifnum, 1);
+ }
- /* Check if in installer mode */
- if (truinstall && id->driver_info == DEVICE_INSTALLER) {
- dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n");
- result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
- /* Don't bind to the device when in installer mode */
- kfree(num_ports);
- return -EIO;
- } else if (id->driver_info == DEVICE_1_PORT)
- *num_ports = 1;
- else if (ifnum == 0x99)
+ /* Dummy interface present on some SKUs should be ignored */
+ if (ifnum == 0x99)
*num_ports = 0;
+ else if (numendpoints <= 3)
+ *num_ports = 1;
else
- *num_ports = 3;
+ *num_ports = (numendpoints-1)/2;
+
/*
* save off our num_ports info so that we can use it in the
* calc_num_ports callback
@@ -187,40 +165,50 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
+ { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
{ USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
{ USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
{ USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
{ USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
- { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */
+ /* Sierra Wireless C597 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
+ { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */
{ USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
{ USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
- { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */
+ { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */
{ USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */
{ USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */
{ USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
{ USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
- { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/
- { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/
- { USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless MC8785 Composite*/
+ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
+ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
+ { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
+ { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
+ { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
+ { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */
{ USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
{ USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
{ USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
{ USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */
{ USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */
{ USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
- { USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */
- { USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */
-
- { USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */
- { USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */
-
- { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */
- { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */
+ { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
+ { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
+ /* Sierra Wireless C885 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
+
+ { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
+ { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
- { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER},
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -268,13 +256,19 @@ static int sierra_send_setup(struct tty_struct *tty,
if (portdata->rts_state)
val |= 0x02;
- /* Determine which port is targeted */
- if (port->bulk_out_endpointAddress == 2)
- interface = 0;
- else if (port->bulk_out_endpointAddress == 4)
- interface = 1;
- else if (port->bulk_out_endpointAddress == 5)
- interface = 2;
+ /* If composite device then properly report interface */
+ if (serial->num_ports == 1)
+ interface = sierra_calc_interface(serial);
+
+ /* Otherwise the need to do non-composite mapping */
+ else {
+ if (port->bulk_out_endpointAddress == 2)
+ interface = 0;
+ else if (port->bulk_out_endpointAddress == 4)
+ interface = 1;
+ else if (port->bulk_out_endpointAddress == 5)
+ interface = 2;
+ }
return usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
@@ -713,7 +707,7 @@ static void sierra_shutdown(struct usb_serial *serial)
static struct usb_serial_driver sierra_device = {
.driver = {
.owner = THIS_MODULE,
- .name = "sierra1",
+ .name = "sierra",
},
.description = "Sierra USB modem",
.id_table = id_table,
@@ -769,14 +763,8 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-module_param(truinstall, bool, 0);
-MODULE_PARM_DESC(truinstall, "TRU-Install support");
-
-module_param(nmea, bool, 0);
+module_param(nmea, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nmea, "NMEA streaming");
-#ifdef CONFIG_USB_DEBUG
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug messages");
-#endif
-
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8c2d531eedea..b157c48e8b78 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -122,9 +122,6 @@ static void return_serial(struct usb_serial *serial)
dbg("%s", __func__);
- if (serial == NULL)
- return;
-
for (i = 0; i < serial->num_ports; ++i)
serial_table[serial->minor + i] = NULL;
}
@@ -142,7 +139,8 @@ static void destroy_serial(struct kref *kref)
serial->type->shutdown(serial);
/* return the minor range that this device had */
- return_serial(serial);
+ if (serial->minor != SERIAL_TTY_NO_MINOR)
+ return_serial(serial);
for (i = 0; i < serial->num_ports; ++i)
serial->port[i]->port.count = 0;
@@ -575,6 +573,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
serial->interface = interface;
kref_init(&serial->kref);
mutex_init(&serial->disc_mutex);
+ serial->minor = SERIAL_TTY_NO_MINOR;
return serial;
}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 3d9249632ae1..c76034672c18 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -146,6 +146,18 @@ config USB_STORAGE_KARMA
on the resulting scsi device node returns the Karma to normal
operation.
+config USB_STORAGE_SIERRA
+ bool "Sierra Wireless TRU-Install Feature Support"
+ depends on USB_STORAGE
+ help
+ Say Y here to include additional code to support Sierra Wireless
+ products with the TRU-Install feature (e.g., AC597E, AC881U).
+
+ This code switches the Sierra Wireless device from being in
+ Mass Storage mode to Modem mode. It also has the ability to
+ support host software upgrades should full Linux support be added
+ to TRU-Install.
+
config USB_STORAGE_CYPRESS_ATACB
bool "SAT emulation on Cypress USB/ATA Bridge with ATACB"
depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index 4c596c766c53..bc3415b475c9 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -21,6 +21,7 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o
usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o
usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o
usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o
+usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o
usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
new file mode 100644
index 000000000000..4359a2cb42df
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.c
@@ -0,0 +1,207 @@
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/usb.h>
+
+#include "usb.h"
+#include "transport.h"
+#include "protocol.h"
+#include "scsiglue.h"
+#include "sierra_ms.h"
+#include "debug.h"
+
+#define SWIMS_USB_REQUEST_SetSwocMode 0x0B
+#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
+#define SWIMS_USB_INDEX_SetMode 0x0000
+#define SWIMS_SET_MODE_Modem 0x0001
+
+#define TRU_NORMAL 0x01
+#define TRU_FORCE_MS 0x02
+#define TRU_FORCE_MODEM 0x03
+
+static unsigned int swi_tru_install = 1;
+module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def),"
+ " 2=Force CD-Rom, 3=Force Modem)");
+
+struct swoc_info {
+ __u8 rev;
+ __u8 reserved[8];
+ __u16 LinuxSKU;
+ __u16 LinuxVer;
+ __u8 reserved2[47];
+} __attribute__((__packed__));
+
+static bool containsFullLinuxPackage(struct swoc_info *swocInfo)
+{
+ if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) ||
+ (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF))
+ return true;
+ else
+ return false;
+}
+
+static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
+{
+ int result;
+ US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n");
+ result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */
+ USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */
+ eSWocMode, /* __u16 value */
+ 0x0000, /* __u16 index */
+ NULL, /* void *data */
+ 0, /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+ return result;
+}
+
+
+static int sierra_get_swoc_info(struct usb_device *udev,
+ struct swoc_info *swocInfo)
+{
+ int result;
+
+ US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n");
+
+ result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */
+ USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */
+ 0, /* __u16 value */
+ 0, /* __u16 index */
+ (void *) swocInfo, /* void *data */
+ sizeof(struct swoc_info), /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+
+ swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU);
+ swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer);
+ return result;
+}
+
+static void debug_swoc(struct swoc_info *swocInfo)
+{
+ US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev);
+ US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU);
+ US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer);
+}
+
+
+static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct swoc_info *swocInfo;
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int result;
+ if (swi_tru_install == TRU_FORCE_MS) {
+ result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
+ } else {
+ swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
+ if (!swocInfo) {
+ US_DEBUGP("SWIMS: Allocation failure\n");
+ snprintf(buf, PAGE_SIZE, "Error\n");
+ return -ENOMEM;
+ }
+ result = sierra_get_swoc_info(udev, swocInfo);
+ if (result < 0) {
+ US_DEBUGP("SWIMS: failed SWoC query\n");
+ kfree(swocInfo);
+ snprintf(buf, PAGE_SIZE, "Error\n");
+ return -EIO;
+ }
+ debug_swoc(swocInfo);
+ result = snprintf(buf, PAGE_SIZE,
+ "REV=%02d SKU=%04X VER=%04X\n",
+ swocInfo->rev,
+ swocInfo->LinuxSKU,
+ swocInfo->LinuxVer);
+ kfree(swocInfo);
+ }
+ return result;
+}
+static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
+
+int sierra_ms_init(struct us_data *us)
+{
+ int result, retries;
+ signed long delay_t;
+ struct swoc_info *swocInfo;
+ struct usb_device *udev;
+ struct Scsi_Host *sh;
+ struct scsi_device *sd;
+
+ delay_t = 2;
+ retries = 3;
+ result = 0;
+ udev = us->pusb_dev;
+
+ sh = us_to_host(us);
+ sd = scsi_get_host_dev(sh);
+
+ US_DEBUGP("SWIMS: sierra_ms_init called\n");
+
+ /* Force Modem mode */
+ if (swi_tru_install == TRU_FORCE_MODEM) {
+ US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n");
+ result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
+ if (result < 0)
+ US_DEBUGP("SWIMS: Failed to switch to modem mode.\n");
+ return -EIO;
+ }
+ /* Force Mass Storage mode (keep CD-Rom) */
+ else if (swi_tru_install == TRU_FORCE_MS) {
+ US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n");
+ goto complete;
+ }
+ /* Normal TRU-Install Logic */
+ else {
+ US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n");
+
+ swocInfo = kmalloc(sizeof(struct swoc_info),
+ GFP_KERNEL);
+ if (!swocInfo) {
+ US_DEBUGP("SWIMS: %s", "Allocation failure\n");
+ return -ENOMEM;
+ }
+
+ retries = 3;
+ do {
+ retries--;
+ result = sierra_get_swoc_info(udev, swocInfo);
+ if (result < 0) {
+ US_DEBUGP("SWIMS: %s", "Failed SWoC query\n");
+ schedule_timeout_uninterruptible(2*HZ);
+ }
+ } while (retries && result < 0);
+
+ if (result < 0) {
+ US_DEBUGP("SWIMS: %s",
+ "Completely failed SWoC query\n");
+ kfree(swocInfo);
+ return -EIO;
+ }
+
+ debug_swoc(swocInfo);
+
+ /* If there is not Linux software on the TRU-Install device
+ * then switch to modem mode
+ */
+ if (!containsFullLinuxPackage(swocInfo)) {
+ US_DEBUGP("SWIMS: %s",
+ "Switching to Modem Mode\n");
+ result = sierra_set_ms_mode(udev,
+ SWIMS_SET_MODE_Modem);
+ if (result < 0)
+ US_DEBUGP("SWIMS: Failed to switch modem\n");
+ kfree(swocInfo);
+ return -EIO;
+ }
+ kfree(swocInfo);
+ }
+complete:
+ result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h
new file mode 100644
index 000000000000..bb48634ac1fc
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.h
@@ -0,0 +1,4 @@
+#ifndef _SIERRA_MS_H_
+#define _SIERRA_MS_H_
+extern int sierra_ms_init(struct us_data *us);
+#endif
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index fcbbfdb7b2b0..3523a0bfa0ff 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1032,8 +1032,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
/* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us */
- if (residue) {
- if (!(us->fflags & US_FL_IGNORE_RESIDUE)) {
+ if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
+
+ /* Heuristically detect devices that generate bogus residues
+ * by seeing what happens with INQUIRY and READ CAPACITY
+ * commands.
+ */
+ if (bcs->Status == US_BULK_STAT_OK &&
+ scsi_get_resid(srb) == 0 &&
+ ((srb->cmnd[0] == INQUIRY &&
+ transfer_length == 36) ||
+ (srb->cmnd[0] == READ_CAPACITY &&
+ transfer_length == 8))) {
+ us->fflags |= US_FL_IGNORE_RESIDUE;
+
+ } else {
residue = min(residue, transfer_length);
scsi_set_resid(srb, max(scsi_get_resid(srb),
(int) residue));
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7ae69f55aa96..ba412e68d474 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -225,6 +225,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Reported by Cedric Godin <cedric@belbone.be> */
+UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
+ "Nokia",
+ "5300",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
"SMSC",
@@ -356,14 +363,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200,
US_FL_FIX_CAPACITY),
/* Reported by Emil Larsson <emil@swip.net> */
-UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110,
+UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111,
"NIKON",
"NIKON DSC D80",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY),
/* Reported by Ortwin Glueck <odi@odi.ch> */
-UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110,
+UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111,
"NIKON",
"NIKON DSC D40",
US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1185,6 +1192,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
+/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
+UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
+ "Simple Tech/Datafab",
+ "CF+SM Reader",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
* to the USB storage specification in two ways:
* - They tell us they are using transport protocol CBI. In reality they
@@ -1562,6 +1576,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
US_SC_DEVICE, US_PR_DEVICE, NULL,
0),
+#ifdef CONFIG_USB_STORAGE_SIERRA
/* Reported by Kevin Lloyd <linux@sierrawireless.com>
* Entry is needed for the initializer function override,
* which instructs the device to load as a modem
@@ -1570,8 +1585,9 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
"Sierra Wireless",
"USB MMC Storage",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_IGNORE_DEVICE),
+ US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init,
+ 0),
+#endif
/* Reported by Jaco Kroon <jaco@kroon.co.za>
* The usb-storage module found on the Digitech GNX4 (and supposedly other
@@ -1743,6 +1759,15 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002,
US_FL_FIX_CAPACITY),
/*
+ * Patch by Jost Diederichs <jost@qdusa.com>
+ */
+UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999,
+ "Motorola Inc.",
+ "Motorola Phone (RAZRV3xx)",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY),
+
+/*
* Patch by Constantin Baranov <const@tltsu.ru>
* Report by Andreas Koenecke.
* Motorola ROKR Z6.
@@ -1767,6 +1792,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
+UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
+ "iRiver",
+ "MP3 T10",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/*
* David Härdeman <david@2gen.com>
* The key makes the SCSI stack print confusing (but harmless) messages
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bfea851be985..73679aa506de 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -102,6 +102,9 @@
#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
#include "cypress_atacb.h"
#endif
+#ifdef CONFIG_USB_STORAGE_SIERRA
+#include "sierra_ms.h"
+#endif
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index e7018a2f56af..9c5925927ece 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -39,7 +39,9 @@
#endif
#if defined(CONFIG_ARCH_AT91)
-#define ATMEL_LCDFB_FBINFO_DEFAULT FBINFO_DEFAULT
+#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
+ | FBINFO_PARTIAL_PAN_OK \
+ | FBINFO_HWACCEL_YPAN)
static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
struct fb_var_screeninfo *var)
@@ -177,7 +179,7 @@ static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 0,
- .ypanstep = 0,
+ .ypanstep = 1,
.ywrapstep = 0,
.accel = FB_ACCEL_NONE,
};
@@ -240,9 +242,11 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
{
struct fb_info *info = sinfo->info;
struct fb_var_screeninfo *var = &info->var;
+ unsigned int smem_len;
- info->fix.smem_len = (var->xres_virtual * var->yres_virtual
- * ((var->bits_per_pixel + 7) / 8));
+ smem_len = (var->xres_virtual * var->yres_virtual
+ * ((var->bits_per_pixel + 7) / 8));
+ info->fix.smem_len = max(smem_len, sinfo->smem_len);
info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
(dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
@@ -794,6 +798,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
sinfo->default_monspecs = pdata_sinfo->default_monspecs;
sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
sinfo->guard_time = pdata_sinfo->guard_time;
+ sinfo->smem_len = pdata_sinfo->smem_len;
sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
} else {
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
index 4d13f68436e6..aa95f8350242 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/aty/radeon_accel.c
@@ -55,6 +55,10 @@ static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo,
OUTREG(DP_WRITE_MSK, 0xffffffff);
OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM));
+ radeon_fifo_wait(2);
+ OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
+ OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
+
radeon_fifo_wait(2);
OUTREG(DST_Y_X, (region->dy << 16) | region->dx);
OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height);
@@ -116,6 +120,10 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo,
OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0)
| (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
+ radeon_fifo_wait(2);
+ OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
+ OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
+
radeon_fifo_wait(3);
OUTREG(SRC_Y_X, (sy << 16) | sx);
OUTREG(DST_Y_X, (dy << 16) | dx);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 33859934a8e4..c6299e8a041d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2518,7 +2518,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
c = vc->vc_video_erase_char;
vc->vc_video_erase_char =
((c & 0xfe00) >> 1) | (c & 0xff);
- c = vc->vc_def_color;
+ c = vc->vc_scrl_erase_char;
vc->vc_scrl_erase_char =
((c & 0xFE00) >> 1) | (c & 0xFF);
vc->vc_attr >>= 1;
@@ -2551,7 +2551,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
if (vc->vc_can_do_color) {
vc->vc_video_erase_char =
((c & 0xff00) << 1) | (c & 0xff);
- c = vc->vc_def_color;
+ c = vc->vc_scrl_erase_char;
vc->vc_scrl_erase_char =
((c & 0xFF00) << 1) | (c & 0xFF);
vc->vc_attr <<= 1;
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index bd320a2bfb7c..fb51197d1c98 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -479,6 +479,10 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
base_plane_width = machine_data->fsl_diu_info[0]->var.xres;
base_plane_height = machine_data->fsl_diu_info[0]->var.yres;
+ if (mfbi->x_aoi_d < 0)
+ mfbi->x_aoi_d = 0;
+ if (mfbi->y_aoi_d < 0)
+ mfbi->y_aoi_d = 0;
switch (index) {
case 0:
if (mfbi->x_aoi_d != 0)
@@ -778,6 +782,22 @@ static void unmap_video_memory(struct fb_info *info)
}
/*
+ * Using the fb_var_screeninfo in fb_info we set the aoi of this
+ * particular framebuffer. It is a light version of fsl_diu_set_par.
+ */
+static int fsl_diu_set_aoi(struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+ struct mfb_info *mfbi = info->par;
+ struct diu_ad *ad = mfbi->ad;
+
+ /* AOI should not be greater than display size */
+ ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset);
+ ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
+ return 0;
+}
+
+/*
* Using the fb_var_screeninfo in fb_info we set the resolution of this
* particular framebuffer. This function alters the fb_fix_screeninfo stored
* in fb_info. It does not alter var in fb_info since we are using that
@@ -817,11 +837,11 @@ static int fsl_diu_set_par(struct fb_info *info)
diu_ops.get_pixel_format(var->bits_per_pixel,
machine_data->monitor_port);
ad->addr = cpu_to_le32(info->fix.smem_start);
- ad->src_size_g_alpha = cpu_to_le32((var->yres << 12) |
- var->xres) | mfbi->g_alpha;
- /* fix me. AOI should not be greater than display size */
+ ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) |
+ var->xres_virtual) | mfbi->g_alpha;
+ /* AOI should not be greater than display size */
ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres);
- ad->offset_xyi = 0;
+ ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset);
ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
/* Disable chroma keying function */
@@ -921,6 +941,8 @@ static int fsl_diu_pan_display(struct fb_var_screeninfo *var,
else
info->var.vmode &= ~FB_VMODE_YWRAP;
+ fsl_diu_set_aoi(info);
+
return 0;
}
@@ -989,7 +1011,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
pr_debug("set AOI display offset of index %d to (%d,%d)\n",
mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
fsl_diu_check_var(&info->var, info);
- fsl_diu_set_par(info);
+ fsl_diu_set_aoi(info);
break;
case MFB_GET_AOID:
aoi_d.x_aoi_d = mfbi->x_aoi_d;
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c
index 75ee5a12e549..c14e3e2212b3 100644
--- a/drivers/video/matrox/i2c-matroxfb.c
+++ b/drivers/video/matrox/i2c-matroxfb.c
@@ -87,13 +87,7 @@ static int matroxfb_gpio_getscl(void* data) {
return (matroxfb_read_gpio(b->minfo) & b->mask.clock) ? 1 : 0;
}
-static struct i2c_adapter matrox_i2c_adapter_template =
-{
- .owner = THIS_MODULE,
- .id = I2C_HW_B_G400,
-};
-
-static struct i2c_algo_bit_data matrox_i2c_algo_template =
+static const struct i2c_algo_bit_data matrox_i2c_algo_template =
{
.setsda = matroxfb_gpio_setsda,
.setscl = matroxfb_gpio_setscl,
@@ -112,7 +106,7 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo,
b->minfo = minfo;
b->mask.data = data;
b->mask.clock = clock;
- b->adapter = matrox_i2c_adapter_template;
+ b->adapter.owner = THIS_MODULE;
snprintf(b->adapter.name, sizeof(b->adapter.name), name,
minfo->fbcon.node);
i2c_set_adapdata(&b->adapter, b);
@@ -187,6 +181,17 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0);
if (err)
printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n");
+ else {
+ struct i2c_board_info maven_info = {
+ I2C_BOARD_INFO("maven", 0x1b),
+ };
+ unsigned short const addr_list[2] = {
+ 0x1b, I2C_CLIENT_END
+ };
+
+ i2c_new_probed_device(&m2info->maven.adapter,
+ &maven_info, addr_list);
+ }
}
return m2info;
fail_ddc1:;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 89da27bd5c49..042408a8c631 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -19,8 +19,6 @@
#include <linux/matroxfb.h>
#include <asm/div64.h>
-#define MAVEN_I2CID (0x1B)
-
#define MGATVO_B 1
#define MGATVO_C 2
@@ -128,7 +126,7 @@ static int get_ctrl_id(__u32 v4l2_id) {
struct maven_data {
struct matrox_fb_info* primary_head;
- struct i2c_client client;
+ struct i2c_client *client;
int version;
};
@@ -974,7 +972,7 @@ static inline int maven_compute_timming(struct maven_data* md,
static int maven_program_timming(struct maven_data* md,
const struct mavenregs* m) {
- struct i2c_client* c = &md->client;
+ struct i2c_client *c = md->client;
if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) {
LR(0x80);
@@ -1011,7 +1009,7 @@ static int maven_program_timming(struct maven_data* md,
}
static inline int maven_resync(struct maven_data* md) {
- struct i2c_client* c = &md->client;
+ struct i2c_client *c = md->client;
maven_set_reg(c, 0x95, 0x20); /* start whole thing */
return 0;
}
@@ -1069,48 +1067,48 @@ static int maven_set_control (struct maven_data* md,
maven_compute_bwlevel(md, &blacklevel, &whitelevel);
blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8);
whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8);
- maven_set_reg_pair(&md->client, 0x0e, blacklevel);
- maven_set_reg_pair(&md->client, 0x1e, whitelevel);
+ maven_set_reg_pair(md->client, 0x0e, blacklevel);
+ maven_set_reg_pair(md->client, 0x1e, whitelevel);
}
break;
case V4L2_CID_SATURATION:
{
- maven_set_reg(&md->client, 0x20, p->value);
- maven_set_reg(&md->client, 0x22, p->value);
+ maven_set_reg(md->client, 0x20, p->value);
+ maven_set_reg(md->client, 0x22, p->value);
}
break;
case V4L2_CID_HUE:
{
- maven_set_reg(&md->client, 0x25, p->value);
+ maven_set_reg(md->client, 0x25, p->value);
}
break;
case V4L2_CID_GAMMA:
{
const struct maven_gamma* g;
g = maven_compute_gamma(md);
- maven_set_reg(&md->client, 0x83, g->reg83);
- maven_set_reg(&md->client, 0x84, g->reg84);
- maven_set_reg(&md->client, 0x85, g->reg85);
- maven_set_reg(&md->client, 0x86, g->reg86);
- maven_set_reg(&md->client, 0x87, g->reg87);
- maven_set_reg(&md->client, 0x88, g->reg88);
- maven_set_reg(&md->client, 0x89, g->reg89);
- maven_set_reg(&md->client, 0x8a, g->reg8a);
- maven_set_reg(&md->client, 0x8b, g->reg8b);
+ maven_set_reg(md->client, 0x83, g->reg83);
+ maven_set_reg(md->client, 0x84, g->reg84);
+ maven_set_reg(md->client, 0x85, g->reg85);
+ maven_set_reg(md->client, 0x86, g->reg86);
+ maven_set_reg(md->client, 0x87, g->reg87);
+ maven_set_reg(md->client, 0x88, g->reg88);
+ maven_set_reg(md->client, 0x89, g->reg89);
+ maven_set_reg(md->client, 0x8a, g->reg8a);
+ maven_set_reg(md->client, 0x8b, g->reg8b);
}
break;
case MATROXFB_CID_TESTOUT:
{
unsigned char val
- = maven_get_reg(&md->client,0x8d);
+ = maven_get_reg(md->client, 0x8d);
if (p->value) val |= 0x10;
else val &= ~0x10;
- maven_set_reg(&md->client, 0x8d, val);
+ maven_set_reg(md->client, 0x8d, val);
}
break;
case MATROXFB_CID_DEFLICKER:
{
- maven_set_reg(&md->client, 0x93, maven_compute_deflicker(md));
+ maven_set_reg(md->client, 0x93, maven_compute_deflicker(md));
}
break;
}
@@ -1189,6 +1187,7 @@ static int maven_init_client(struct i2c_client* clnt) {
MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo);
md->primary_head = MINFO;
+ md->client = clnt;
down_write(&ACCESS_FBINFO(altout.lock));
ACCESS_FBINFO(outputs[1]).output = &maven_altout;
ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src;
@@ -1232,14 +1231,11 @@ static int maven_shutdown_client(struct i2c_client* clnt) {
return 0;
}
-static const unsigned short normal_i2c[] = { MAVEN_I2CID, I2C_CLIENT_END };
-I2C_CLIENT_INSMOD;
-
-static struct i2c_driver maven_driver;
-
-static int maven_detect_client(struct i2c_adapter* adapter, int address, int kind) {
- int err = 0;
- struct i2c_client* new_client;
+static int maven_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int err = -ENODEV;
struct maven_data* data;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
@@ -1250,50 +1246,37 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin
err = -ENOMEM;
goto ERROR0;
}
- new_client = &data->client;
- i2c_set_clientdata(new_client, data);
- new_client->addr = address;
- new_client->adapter = adapter;
- new_client->driver = &maven_driver;
- new_client->flags = 0;
- strlcpy(new_client->name, "maven", I2C_NAME_SIZE);
- if ((err = i2c_attach_client(new_client)))
- goto ERROR3;
- err = maven_init_client(new_client);
+ i2c_set_clientdata(client, data);
+ err = maven_init_client(client);
if (err)
goto ERROR4;
return 0;
ERROR4:;
- i2c_detach_client(new_client);
-ERROR3:;
- kfree(new_client);
+ kfree(data);
ERROR0:;
return err;
}
-static int maven_attach_adapter(struct i2c_adapter* adapter) {
- if (adapter->id == I2C_HW_B_G400)
- return i2c_probe(adapter, &addr_data, &maven_detect_client);
- return 0;
-}
-
-static int maven_detach_client(struct i2c_client* client) {
- int err;
-
- if ((err = i2c_detach_client(client)))
- return err;
+static int maven_remove(struct i2c_client *client)
+{
maven_shutdown_client(client);
kfree(i2c_get_clientdata(client));
return 0;
}
+static const struct i2c_device_id maven_id[] = {
+ { "maven", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, maven_id);
+
static struct i2c_driver maven_driver={
.driver = {
.name = "maven",
},
- .id = I2C_DRIVERID_MGATVO,
- .attach_adapter = maven_attach_adapter,
- .detach_client = maven_detach_client,
+ .probe = maven_probe,
+ .remove = maven_remove,
+ .id_table = maven_id,
};
static int __init matroxfb_maven_init(void)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index e7aa7ae8fca8..97204497d9f7 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1031,7 +1031,9 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
pxa_gpio_mode(GPIO74_LCD_FCLK_MD);
pxa_gpio_mode(GPIO75_LCD_LCLK_MD);
pxa_gpio_mode(GPIO76_LCD_PCLK_MD);
- pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD);
+
+ if ((lccr0 & LCCR0_PAS) == 0)
+ pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD);
}
static void pxafb_enable_controller(struct pxafb_info *fbi)
@@ -1400,6 +1402,8 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi,
if (lcd_conn == LCD_MONO_STN_8BPP)
fbi->lccr0 |= LCCR0_DPD;
+ fbi->lccr0 |= (lcd_conn & LCD_ALTERNATE_MAPPING) ? LCCR0_LDDALT : 0;
+
fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff);
fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0;
fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0;
@@ -1673,53 +1677,63 @@ MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
#define pxafb_setup_options() (0)
#endif
-static int __devinit pxafb_probe(struct platform_device *dev)
-{
- struct pxafb_info *fbi;
- struct pxafb_mach_info *inf;
- struct resource *r;
- int irq, ret;
-
- dev_dbg(&dev->dev, "pxafb_probe\n");
-
- inf = dev->dev.platform_data;
- ret = -ENOMEM;
- fbi = NULL;
- if (!inf)
- goto failed;
-
- ret = pxafb_parse_options(&dev->dev, g_options);
- if (ret < 0)
- goto failed;
-
#ifdef DEBUG_VAR
- /* Check for various illegal bit-combinations. Currently only
- * a warning is given. */
+/* Check for various illegal bit-combinations. Currently only
+ * a warning is given. */
+static void __devinit pxafb_check_options(struct device *dev,
+ struct pxafb_mach_info *inf)
+{
+ if (inf->lcd_conn)
+ return;
if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK)
- dev_warn(&dev->dev, "machine LCCR0 setting contains "
+ dev_warn(dev, "machine LCCR0 setting contains "
"illegal bits: %08x\n",
inf->lccr0 & LCCR0_INVALID_CONFIG_MASK);
if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK)
- dev_warn(&dev->dev, "machine LCCR3 setting contains "
+ dev_warn(dev, "machine LCCR3 setting contains "
"illegal bits: %08x\n",
inf->lccr3 & LCCR3_INVALID_CONFIG_MASK);
if (inf->lccr0 & LCCR0_DPD &&
((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas ||
(inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl ||
(inf->lccr0 & LCCR0_CMS) != LCCR0_Mono))
- dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is "
+ dev_warn(dev, "Double Pixel Data (DPD) mode is "
"only valid in passive mono"
" single panel mode\n");
if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act &&
(inf->lccr0 & LCCR0_SDS) == LCCR0_Dual)
- dev_warn(&dev->dev, "Dual panel only valid in passive mode\n");
+ dev_warn(dev, "Dual panel only valid in passive mode\n");
if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas &&
(inf->modes->upper_margin || inf->modes->lower_margin))
- dev_warn(&dev->dev, "Upper and lower margins must be 0 in "
+ dev_warn(dev, "Upper and lower margins must be 0 in "
"passive mode\n");
+}
+#else
+#define pxafb_check_options(...) do {} while (0)
#endif
+static int __devinit pxafb_probe(struct platform_device *dev)
+{
+ struct pxafb_info *fbi;
+ struct pxafb_mach_info *inf;
+ struct resource *r;
+ int irq, ret;
+
+ dev_dbg(&dev->dev, "pxafb_probe\n");
+
+ inf = dev->dev.platform_data;
+ ret = -ENOMEM;
+ fbi = NULL;
+ if (!inf)
+ goto failed;
+
+ ret = pxafb_parse_options(&dev->dev, g_options);
+ if (ret < 0)
+ goto failed;
+
+ pxafb_check_options(&dev->dev, inf);
+
dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n",
inf->modes->xres,
inf->modes->yres,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 32b9fe153641..db20542796bf 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -285,10 +285,11 @@ config ALIM1535_WDT
config ALIM7101_WDT
tristate "ALi M7101 PMU Computer Watchdog"
- depends on X86 && PCI
+ depends on PCI
help
This is the driver for the hardware watchdog on the ALi M7101 PMU
- as used in the x86 Cobalt servers.
+ as used in the x86 Cobalt servers and also found in some
+ SPARC Netra servers too.
To compile this driver as a module, choose M here: the
module will be called alim7101_wdt.
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 3da2b90d2fe6..22715e3be5e7 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -157,8 +157,6 @@ static void s3c2410wdt_start(void)
writel(wdt_count, wdt_base + S3C2410_WTCNT);
writel(wtcon, wdt_base + S3C2410_WTCON);
spin_unlock(&wdt_lock);
-
- return 0;
}
static int s3c2410wdt_set_heartbeat(int timeout)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index e8da4ee761b5..25ecbd5b0404 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -175,6 +175,8 @@ out_no_root:
if (inode)
iput(inode);
+ cifs_umount(sb, cifs_sb);
+
out_mount_failed:
if (cifs_sb) {
#ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 28a22092d450..848286861c31 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -649,6 +649,7 @@ struct inode *cifs_iget(struct super_block *sb, unsigned long ino)
inode->i_fop = &simple_dir_operations;
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
+ } else if (rc) {
_FreeXid(xid);
iget_failed(inode);
return ERR_PTR(rc);
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index c4e7d721bd8d..89d2fb7b991a 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -30,16 +30,16 @@
static struct config_group *space_list;
static struct config_group *comm_list;
-static struct comm *local_comm;
+static struct dlm_comm *local_comm;
-struct clusters;
-struct cluster;
-struct spaces;
-struct space;
-struct comms;
-struct comm;
-struct nodes;
-struct node;
+struct dlm_clusters;
+struct dlm_cluster;
+struct dlm_spaces;
+struct dlm_space;
+struct dlm_comms;
+struct dlm_comm;
+struct dlm_nodes;
+struct dlm_node;
static struct config_group *make_cluster(struct config_group *, const char *);
static void drop_cluster(struct config_group *, struct config_item *);
@@ -68,17 +68,22 @@ static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
const char *buf, size_t len);
-static ssize_t comm_nodeid_read(struct comm *cm, char *buf);
-static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len);
-static ssize_t comm_local_read(struct comm *cm, char *buf);
-static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len);
-static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len);
-static ssize_t node_nodeid_read(struct node *nd, char *buf);
-static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len);
-static ssize_t node_weight_read(struct node *nd, char *buf);
-static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len);
-
-struct cluster {
+static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf);
+static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
+ size_t len);
+static ssize_t comm_local_read(struct dlm_comm *cm, char *buf);
+static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
+ size_t len);
+static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf,
+ size_t len);
+static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf);
+static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
+ size_t len);
+static ssize_t node_weight_read(struct dlm_node *nd, char *buf);
+static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
+ size_t len);
+
+struct dlm_cluster {
struct config_group group;
unsigned int cl_tcp_port;
unsigned int cl_buffer_size;
@@ -109,11 +114,11 @@ enum {
struct cluster_attribute {
struct configfs_attribute attr;
- ssize_t (*show)(struct cluster *, char *);
- ssize_t (*store)(struct cluster *, const char *, size_t);
+ ssize_t (*show)(struct dlm_cluster *, char *);
+ ssize_t (*store)(struct dlm_cluster *, const char *, size_t);
};
-static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
+static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
int *info_field, int check_zero,
const char *buf, size_t len)
{
@@ -134,12 +139,12 @@ static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
}
#define CLUSTER_ATTR(name, check_zero) \
-static ssize_t name##_write(struct cluster *cl, const char *buf, size_t len) \
+static ssize_t name##_write(struct dlm_cluster *cl, const char *buf, size_t len) \
{ \
return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \
check_zero, buf, len); \
} \
-static ssize_t name##_read(struct cluster *cl, char *buf) \
+static ssize_t name##_read(struct dlm_cluster *cl, char *buf) \
{ \
return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \
} \
@@ -181,8 +186,8 @@ enum {
struct comm_attribute {
struct configfs_attribute attr;
- ssize_t (*show)(struct comm *, char *);
- ssize_t (*store)(struct comm *, const char *, size_t);
+ ssize_t (*show)(struct dlm_comm *, char *);
+ ssize_t (*store)(struct dlm_comm *, const char *, size_t);
};
static struct comm_attribute comm_attr_nodeid = {
@@ -222,8 +227,8 @@ enum {
struct node_attribute {
struct configfs_attribute attr;
- ssize_t (*show)(struct node *, char *);
- ssize_t (*store)(struct node *, const char *, size_t);
+ ssize_t (*show)(struct dlm_node *, char *);
+ ssize_t (*store)(struct dlm_node *, const char *, size_t);
};
static struct node_attribute node_attr_nodeid = {
@@ -248,26 +253,26 @@ static struct configfs_attribute *node_attrs[] = {
NULL,
};
-struct clusters {
+struct dlm_clusters {
struct configfs_subsystem subsys;
};
-struct spaces {
+struct dlm_spaces {
struct config_group ss_group;
};
-struct space {
+struct dlm_space {
struct config_group group;
struct list_head members;
struct mutex members_lock;
int members_count;
};
-struct comms {
+struct dlm_comms {
struct config_group cs_group;
};
-struct comm {
+struct dlm_comm {
struct config_item item;
int nodeid;
int local;
@@ -275,11 +280,11 @@ struct comm {
struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
};
-struct nodes {
+struct dlm_nodes {
struct config_group ns_group;
};
-struct node {
+struct dlm_node {
struct config_item item;
struct list_head list; /* space->members */
int nodeid;
@@ -372,38 +377,40 @@ static struct config_item_type node_type = {
.ct_owner = THIS_MODULE,
};
-static struct cluster *to_cluster(struct config_item *i)
+static struct dlm_cluster *to_cluster(struct config_item *i)
{
- return i ? container_of(to_config_group(i), struct cluster, group):NULL;
+ return i ? container_of(to_config_group(i), struct dlm_cluster, group) :
+ NULL;
}
-static struct space *to_space(struct config_item *i)
+static struct dlm_space *to_space(struct config_item *i)
{
- return i ? container_of(to_config_group(i), struct space, group) : NULL;
+ return i ? container_of(to_config_group(i), struct dlm_space, group) :
+ NULL;
}
-static struct comm *to_comm(struct config_item *i)
+static struct dlm_comm *to_comm(struct config_item *i)
{
- return i ? container_of(i, struct comm, item) : NULL;
+ return i ? container_of(i, struct dlm_comm, item) : NULL;
}
-static struct node *to_node(struct config_item *i)
+static struct dlm_node *to_node(struct config_item *i)
{
- return i ? container_of(i, struct node, item) : NULL;
+ return i ? container_of(i, struct dlm_node, item) : NULL;
}
static struct config_group *make_cluster(struct config_group *g,
const char *name)
{
- struct cluster *cl = NULL;
- struct spaces *sps = NULL;
- struct comms *cms = NULL;
+ struct dlm_cluster *cl = NULL;
+ struct dlm_spaces *sps = NULL;
+ struct dlm_comms *cms = NULL;
void *gps = NULL;
- cl = kzalloc(sizeof(struct cluster), GFP_KERNEL);
+ cl = kzalloc(sizeof(struct dlm_cluster), GFP_KERNEL);
gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
- sps = kzalloc(sizeof(struct spaces), GFP_KERNEL);
- cms = kzalloc(sizeof(struct comms), GFP_KERNEL);
+ sps = kzalloc(sizeof(struct dlm_spaces), GFP_KERNEL);
+ cms = kzalloc(sizeof(struct dlm_comms), GFP_KERNEL);
if (!cl || !gps || !sps || !cms)
goto fail;
@@ -443,7 +450,7 @@ static struct config_group *make_cluster(struct config_group *g,
static void drop_cluster(struct config_group *g, struct config_item *i)
{
- struct cluster *cl = to_cluster(i);
+ struct dlm_cluster *cl = to_cluster(i);
struct config_item *tmp;
int j;
@@ -461,20 +468,20 @@ static void drop_cluster(struct config_group *g, struct config_item *i)
static void release_cluster(struct config_item *i)
{
- struct cluster *cl = to_cluster(i);
+ struct dlm_cluster *cl = to_cluster(i);
kfree(cl->group.default_groups);
kfree(cl);
}
static struct config_group *make_space(struct config_group *g, const char *name)
{
- struct space *sp = NULL;
- struct nodes *nds = NULL;
+ struct dlm_space *sp = NULL;
+ struct dlm_nodes *nds = NULL;
void *gps = NULL;
- sp = kzalloc(sizeof(struct space), GFP_KERNEL);
+ sp = kzalloc(sizeof(struct dlm_space), GFP_KERNEL);
gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
- nds = kzalloc(sizeof(struct nodes), GFP_KERNEL);
+ nds = kzalloc(sizeof(struct dlm_nodes), GFP_KERNEL);
if (!sp || !gps || !nds)
goto fail;
@@ -500,7 +507,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
static void drop_space(struct config_group *g, struct config_item *i)
{
- struct space *sp = to_space(i);
+ struct dlm_space *sp = to_space(i);
struct config_item *tmp;
int j;
@@ -517,16 +524,16 @@ static void drop_space(struct config_group *g, struct config_item *i)
static void release_space(struct config_item *i)
{
- struct space *sp = to_space(i);
+ struct dlm_space *sp = to_space(i);
kfree(sp->group.default_groups);
kfree(sp);
}
static struct config_item *make_comm(struct config_group *g, const char *name)
{
- struct comm *cm;
+ struct dlm_comm *cm;
- cm = kzalloc(sizeof(struct comm), GFP_KERNEL);
+ cm = kzalloc(sizeof(struct dlm_comm), GFP_KERNEL);
if (!cm)
return ERR_PTR(-ENOMEM);
@@ -539,7 +546,7 @@ static struct config_item *make_comm(struct config_group *g, const char *name)
static void drop_comm(struct config_group *g, struct config_item *i)
{
- struct comm *cm = to_comm(i);
+ struct dlm_comm *cm = to_comm(i);
if (local_comm == cm)
local_comm = NULL;
dlm_lowcomms_close(cm->nodeid);
@@ -550,16 +557,16 @@ static void drop_comm(struct config_group *g, struct config_item *i)
static void release_comm(struct config_item *i)
{
- struct comm *cm = to_comm(i);
+ struct dlm_comm *cm = to_comm(i);
kfree(cm);
}
static struct config_item *make_node(struct config_group *g, const char *name)
{
- struct space *sp = to_space(g->cg_item.ci_parent);
- struct node *nd;
+ struct dlm_space *sp = to_space(g->cg_item.ci_parent);
+ struct dlm_node *nd;
- nd = kzalloc(sizeof(struct node), GFP_KERNEL);
+ nd = kzalloc(sizeof(struct dlm_node), GFP_KERNEL);
if (!nd)
return ERR_PTR(-ENOMEM);
@@ -578,8 +585,8 @@ static struct config_item *make_node(struct config_group *g, const char *name)
static void drop_node(struct config_group *g, struct config_item *i)
{
- struct space *sp = to_space(g->cg_item.ci_parent);
- struct node *nd = to_node(i);
+ struct dlm_space *sp = to_space(g->cg_item.ci_parent);
+ struct dlm_node *nd = to_node(i);
mutex_lock(&sp->members_lock);
list_del(&nd->list);
@@ -591,11 +598,11 @@ static void drop_node(struct config_group *g, struct config_item *i)
static void release_node(struct config_item *i)
{
- struct node *nd = to_node(i);
+ struct dlm_node *nd = to_node(i);
kfree(nd);
}
-static struct clusters clusters_root = {
+static struct dlm_clusters clusters_root = {
.subsys = {
.su_group = {
.cg_item = {
@@ -625,7 +632,7 @@ void dlm_config_exit(void)
static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
char *buf)
{
- struct cluster *cl = to_cluster(i);
+ struct dlm_cluster *cl = to_cluster(i);
struct cluster_attribute *cla =
container_of(a, struct cluster_attribute, attr);
return cla->show ? cla->show(cl, buf) : 0;
@@ -635,7 +642,7 @@ static ssize_t store_cluster(struct config_item *i,
struct configfs_attribute *a,
const char *buf, size_t len)
{
- struct cluster *cl = to_cluster(i);
+ struct dlm_cluster *cl = to_cluster(i);
struct cluster_attribute *cla =
container_of(a, struct cluster_attribute, attr);
return cla->store ? cla->store(cl, buf, len) : -EINVAL;
@@ -644,7 +651,7 @@ static ssize_t store_cluster(struct config_item *i,
static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
char *buf)
{
- struct comm *cm = to_comm(i);
+ struct dlm_comm *cm = to_comm(i);
struct comm_attribute *cma =
container_of(a, struct comm_attribute, attr);
return cma->show ? cma->show(cm, buf) : 0;
@@ -653,29 +660,31 @@ static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
const char *buf, size_t len)
{
- struct comm *cm = to_comm(i);
+ struct dlm_comm *cm = to_comm(i);
struct comm_attribute *cma =
container_of(a, struct comm_attribute, attr);
return cma->store ? cma->store(cm, buf, len) : -EINVAL;
}
-static ssize_t comm_nodeid_read(struct comm *cm, char *buf)
+static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf)
{
return sprintf(buf, "%d\n", cm->nodeid);
}
-static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len)
+static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
+ size_t len)
{
cm->nodeid = simple_strtol(buf, NULL, 0);
return len;
}
-static ssize_t comm_local_read(struct comm *cm, char *buf)
+static ssize_t comm_local_read(struct dlm_comm *cm, char *buf)
{
return sprintf(buf, "%d\n", cm->local);
}
-static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len)
+static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
+ size_t len)
{
cm->local= simple_strtol(buf, NULL, 0);
if (cm->local && !local_comm)
@@ -683,7 +692,7 @@ static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len)
return len;
}
-static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len)
+static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len)
{
struct sockaddr_storage *addr;
@@ -705,7 +714,7 @@ static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len)
static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
char *buf)
{
- struct node *nd = to_node(i);
+ struct dlm_node *nd = to_node(i);
struct node_attribute *nda =
container_of(a, struct node_attribute, attr);
return nda->show ? nda->show(nd, buf) : 0;
@@ -714,29 +723,31 @@ static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
const char *buf, size_t len)
{
- struct node *nd = to_node(i);
+ struct dlm_node *nd = to_node(i);
struct node_attribute *nda =
container_of(a, struct node_attribute, attr);
return nda->store ? nda->store(nd, buf, len) : -EINVAL;
}
-static ssize_t node_nodeid_read(struct node *nd, char *buf)
+static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf)
{
return sprintf(buf, "%d\n", nd->nodeid);
}
-static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len)
+static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
+ size_t len)
{
nd->nodeid = simple_strtol(buf, NULL, 0);
return len;
}
-static ssize_t node_weight_read(struct node *nd, char *buf)
+static ssize_t node_weight_read(struct dlm_node *nd, char *buf)
{
return sprintf(buf, "%d\n", nd->weight);
}
-static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len)
+static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
+ size_t len)
{
nd->weight = simple_strtol(buf, NULL, 0);
return len;
@@ -746,7 +757,7 @@ static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len)
* Functions for the dlm to get the info that's been configured
*/
-static struct space *get_space(char *name)
+static struct dlm_space *get_space(char *name)
{
struct config_item *i;
@@ -760,15 +771,15 @@ static struct space *get_space(char *name)
return to_space(i);
}
-static void put_space(struct space *sp)
+static void put_space(struct dlm_space *sp)
{
config_item_put(&sp->group.cg_item);
}
-static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr)
+static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr)
{
struct config_item *i;
- struct comm *cm = NULL;
+ struct dlm_comm *cm = NULL;
int found = 0;
if (!comm_list)
@@ -801,7 +812,7 @@ static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr)
return cm;
}
-static void put_comm(struct comm *cm)
+static void put_comm(struct dlm_comm *cm)
{
config_item_put(&cm->item);
}
@@ -810,8 +821,8 @@ static void put_comm(struct comm *cm)
int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
int **new_out, int *new_count_out)
{
- struct space *sp;
- struct node *nd;
+ struct dlm_space *sp;
+ struct dlm_node *nd;
int i = 0, rv = 0, ids_count = 0, new_count = 0;
int *ids, *new;
@@ -874,8 +885,8 @@ int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
int dlm_node_weight(char *lsname, int nodeid)
{
- struct space *sp;
- struct node *nd;
+ struct dlm_space *sp;
+ struct dlm_node *nd;
int w = -EEXIST;
sp = get_space(lsname);
@@ -897,7 +908,7 @@ int dlm_node_weight(char *lsname, int nodeid)
int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
{
- struct comm *cm = get_comm(nodeid, NULL);
+ struct dlm_comm *cm = get_comm(nodeid, NULL);
if (!cm)
return -EEXIST;
if (!cm->addr_count)
@@ -909,7 +920,7 @@ int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
{
- struct comm *cm = get_comm(0, addr);
+ struct dlm_comm *cm = get_comm(0, addr);
if (!cm)
return -EEXIST;
*nodeid = cm->nodeid;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 929e48ae7591..34f14a14fb4e 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -527,8 +527,10 @@ static ssize_t device_write(struct file *file, const char __user *buf,
k32buf = (struct dlm_write_request32 *)kbuf;
kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) -
sizeof(struct dlm_write_request32)), GFP_KERNEL);
- if (!kbuf)
+ if (!kbuf) {
+ kfree(k32buf);
return -ENOMEM;
+ }
if (proc)
set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
@@ -539,8 +541,10 @@ static ssize_t device_write(struct file *file, const char __user *buf,
/* do we really need this? can a write happen after a close? */
if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
- (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)))
- return -EINVAL;
+ (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
+ error = -EINVAL;
+ goto out_free;
+ }
sigfillset(&allsigs);
sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 0c87474f7917..7cc0eb756b55 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1041,10 +1041,7 @@ retry:
}
/*
- * It opens an eventpoll file descriptor. The "size" parameter is there
- * for historical reasons, when epoll was using an hash instead of an
- * RB tree. With the current implementation, the "size" parameter is ignored
- * (besides sanity checks).
+ * Open an eventpoll file descriptor.
*/
asmlinkage long sys_epoll_create1(int flags)
{
diff --git a/fs/inode.c b/fs/inode.c
index b6726f644530..0487ddba1397 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -166,6 +166,7 @@ static struct inode *alloc_inode(struct super_block *sb)
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
mapping->assoc_mapping = NULL;
mapping->backing_dev_info = &default_backing_dev_info;
+ mapping->writeback_index = 0;
/*
* If the block_device provides a backing_dev_info for client
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 8dee32007500..0540ca27a446 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
goto out;
}
- lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_map_acquire(&handle->h_lockdep_map);
out:
return handle;
@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock);
}
- lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
+ lock_map_release(&handle->h_lockdep_map);
jbd_free_handle(handle);
return err;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 4f7cadbb19fa..e5d540588fa9 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
goto out;
}
- lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_map_acquire(&handle->h_lockdep_map);
out:
return handle;
}
@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock);
}
- lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
+ lock_map_release(&handle->h_lockdep_map);
jbd2_free_handle(handle);
return err;
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 399444639337..4a714f64515b 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -83,7 +83,7 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
{
struct nlm_host *host;
struct nlm_file *file;
- int rc = rpc_success;
+ __be32 rc = rpc_success;
dprintk("lockd: TEST4 called\n");
resp->cookie = argp->cookie;
@@ -116,7 +116,7 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
{
struct nlm_host *host;
struct nlm_file *file;
- int rc = rpc_success;
+ __be32 rc = rpc_success;
dprintk("lockd: LOCK called\n");
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 76019d2ff72d..76262c1986f2 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -112,7 +112,7 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
{
struct nlm_host *host;
struct nlm_file *file;
- int rc = rpc_success;
+ __be32 rc = rpc_success;
dprintk("lockd: TEST called\n");
resp->cookie = argp->cookie;
@@ -146,7 +146,7 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
{
struct nlm_host *host;
struct nlm_file *file;
- int rc = rpc_success;
+ __be32 rc = rpc_success;
dprintk("lockd: LOCK called\n");
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 33bfcf09db46..9dc036f18356 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1023,7 +1023,7 @@ exp_export(struct nfsctl_export *nxp)
/* Look up the dentry */
err = path_lookup(nxp->ex_path, 0, &nd);
if (err)
- goto out_unlock;
+ goto out_put_clp;
err = -EINVAL;
exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL);
@@ -1090,9 +1090,9 @@ finish:
exp_put(exp);
if (fsid_key && !IS_ERR(fsid_key))
cache_put(&fsid_key->h, &svc_expkey_cache);
- if (clp)
- auth_domain_put(clp);
path_put(&nd.path);
+out_put_clp:
+ auth_domain_put(clp);
out_unlock:
exp_writeunlock();
out:
diff --git a/fs/omfs/bitmap.c b/fs/omfs/bitmap.c
index 697663b01bae..e1c0ec0ae989 100644
--- a/fs/omfs/bitmap.c
+++ b/fs/omfs/bitmap.c
@@ -92,7 +92,7 @@ int omfs_allocate_block(struct super_block *sb, u64 block)
struct buffer_head *bh;
struct omfs_sb_info *sbi = OMFS_SB(sb);
int bits_per_entry = 8 * sb->s_blocksize;
- int map, bit;
+ unsigned int map, bit;
int ret = 0;
u64 tmp;
@@ -176,7 +176,8 @@ int omfs_clear_range(struct super_block *sb, u64 block, int count)
struct omfs_sb_info *sbi = OMFS_SB(sb);
int bits_per_entry = 8 * sb->s_blocksize;
u64 tmp;
- int map, bit, ret;
+ unsigned int map, bit;
+ int ret;
tmp = block;
bit = do_div(tmp, bits_per_entry);
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 7e2499053e4d..834b2331f6b3 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -26,6 +26,13 @@ static int omfs_sync_file(struct file *file, struct dentry *dentry,
return err ? -EIO : 0;
}
+static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset)
+{
+ return (sbi->s_sys_blocksize - offset -
+ sizeof(struct omfs_extent)) /
+ sizeof(struct omfs_extent_entry) + 1;
+}
+
void omfs_make_empty_table(struct buffer_head *bh, int offset)
{
struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset];
@@ -45,6 +52,7 @@ int omfs_shrink_inode(struct inode *inode)
struct buffer_head *bh;
u64 next, last;
u32 extent_count;
+ u32 max_extents;
int ret;
/* traverse extent table, freeing each entry that is greater
@@ -62,15 +70,18 @@ int omfs_shrink_inode(struct inode *inode)
goto out;
oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
+ max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
for (;;) {
- if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) {
- brelse(bh);
- goto out;
- }
+ if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
+ goto out_brelse;
extent_count = be32_to_cpu(oe->e_extent_count);
+
+ if (extent_count > max_extents)
+ goto out_brelse;
+
last = next;
next = be64_to_cpu(oe->e_next);
entry = &oe->e_entry;
@@ -98,10 +109,14 @@ int omfs_shrink_inode(struct inode *inode)
if (!bh)
goto out;
oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
+ max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
}
ret = 0;
out:
return ret;
+out_brelse:
+ brelse(bh);
+ return ret;
}
static void omfs_truncate(struct inode *inode)
@@ -154,9 +169,7 @@ static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
goto out;
}
}
- max_count = (sbi->s_sys_blocksize - OMFS_EXTENT_START -
- sizeof(struct omfs_extent)) /
- sizeof(struct omfs_extent_entry) + 1;
+ max_count = omfs_max_extents(sbi, OMFS_EXTENT_START);
/* TODO: add a continuation block here */
if (be32_to_cpu(oe->e_extent_count) > max_count-1)
@@ -225,6 +238,7 @@ static int omfs_get_block(struct inode *inode, sector_t block,
sector_t next, offset;
int ret;
u64 new_block;
+ u32 max_extents;
int extent_count;
struct omfs_extent *oe;
struct omfs_extent_entry *entry;
@@ -238,6 +252,7 @@ static int omfs_get_block(struct inode *inode, sector_t block,
goto out;
oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
+ max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
next = inode->i_ino;
for (;;) {
@@ -249,6 +264,9 @@ static int omfs_get_block(struct inode *inode, sector_t block,
next = be64_to_cpu(oe->e_next);
entry = &oe->e_entry;
+ if (extent_count > max_extents)
+ goto out_brelse;
+
offset = find_block(inode, entry, block, extent_count, &remain);
if (offset > 0) {
ret = 0;
@@ -266,6 +284,7 @@ static int omfs_get_block(struct inode *inode, sector_t block,
if (!bh)
goto out;
oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
+ max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
}
if (create) {
ret = omfs_grow_extent(inode, oe, &new_block);
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index a95fe5984f4b..d29047b1b9b0 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -232,8 +232,7 @@ struct inode *omfs_iget(struct super_block *sb, ino_t ino)
inode->i_mode = S_IFDIR | (S_IRWXUGO & ~sbi->s_dmask);
inode->i_op = &omfs_dir_inops;
inode->i_fop = &omfs_dir_operations;
- inode->i_size = be32_to_cpu(oi->i_head.h_body_size) +
- sizeof(struct omfs_header);
+ inode->i_size = sbi->s_sys_blocksize;
inc_nlink(inode);
break;
case OMFS_FILE:
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 282a13596c70..d318c7e663fa 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -27,7 +27,6 @@
#include <linux/mnt_namespace.h>
#include <linux/mount.h>
#include <linux/namei.h>
-#include <linux/quotaops.h>
struct file_system_type reiserfs_fs_type;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 3f54dbd6c49b..5d54205e486b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -443,6 +443,20 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
return -1;
}
+int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits)
+{
+ size_t len = bitmap_scnprintf_len(nr_bits);
+
+ if (m->count + len < m->size) {
+ bitmap_scnprintf(m->buf + m->count, m->size - m->count,
+ bits, nr_bits);
+ m->count += len;
+ return 0;
+ }
+ m->count = m->size;
+ return -1;
+}
+
static void *single_start(struct seq_file *p, loff_t *pos)
{
return NULL + (*pos == 0);
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index d81fb9ed2b8e..154098157473 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -263,8 +263,8 @@ int ubifs_calc_min_idx_lebs(struct ubifs_info *c)
idx_size = c->old_idx_sz + c->budg_idx_growth + c->budg_uncommitted_idx;
- /* And make sure we have twice the index size of space reserved */
- idx_size <<= 1;
+ /* And make sure we have thrice the index size of space reserved */
+ idx_size = idx_size + (idx_size << 1);
/*
* We do not maintain 'old_idx_size' as 'old_idx_lebs'/'old_idx_bytes'
@@ -388,11 +388,11 @@ static int can_use_rp(struct ubifs_info *c)
* This function makes sure UBIFS has enough free eraseblocks for index growth
* and data.
*
- * When budgeting index space, UBIFS reserves twice as more LEBs as the index
+ * When budgeting index space, UBIFS reserves thrice as many LEBs as the index
* would take if it was consolidated and written to the flash. This guarantees
* that the "in-the-gaps" commit method always succeeds and UBIFS will always
* be able to commit dirty index. So this function basically adds amount of
- * budgeted index space to the size of the current index, multiplies this by 2,
+ * budgeted index space to the size of the current index, multiplies this by 3,
* and makes sure this does not exceed the amount of free eraseblocks.
*
* Notes about @c->min_idx_lebs and @c->lst.idx_lebs variables:
@@ -543,8 +543,16 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
int err, idx_growth, data_growth, dd_growth;
struct retries_info ri;
+ ubifs_assert(req->new_page <= 1);
+ ubifs_assert(req->dirtied_page <= 1);
+ ubifs_assert(req->new_dent <= 1);
+ ubifs_assert(req->mod_dent <= 1);
+ ubifs_assert(req->new_ino <= 1);
+ ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA);
ubifs_assert(req->dirtied_ino <= 4);
ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
+ ubifs_assert(!(req->new_ino_d & 7));
+ ubifs_assert(!(req->dirtied_ino_d & 7));
data_growth = calc_data_growth(c, req);
dd_growth = calc_dd_growth(c, req);
@@ -618,8 +626,16 @@ again:
*/
void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
{
+ ubifs_assert(req->new_page <= 1);
+ ubifs_assert(req->dirtied_page <= 1);
+ ubifs_assert(req->new_dent <= 1);
+ ubifs_assert(req->mod_dent <= 1);
+ ubifs_assert(req->new_ino <= 1);
+ ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA);
ubifs_assert(req->dirtied_ino <= 4);
ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
+ ubifs_assert(!(req->new_ino_d & 7));
+ ubifs_assert(!(req->dirtied_ino_d & 7));
if (!req->recalculate) {
ubifs_assert(req->idx_growth >= 0);
ubifs_assert(req->data_growth >= 0);
@@ -647,7 +663,11 @@ void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
ubifs_assert(c->budg_idx_growth >= 0);
ubifs_assert(c->budg_data_growth >= 0);
+ ubifs_assert(c->budg_dd_growth >= 0);
ubifs_assert(c->min_idx_lebs < c->main_lebs);
+ ubifs_assert(!(c->budg_idx_growth & 7));
+ ubifs_assert(!(c->budg_data_growth & 7));
+ ubifs_assert(!(c->budg_dd_growth & 7));
spin_unlock(&c->space_lock);
}
@@ -686,9 +706,10 @@ void ubifs_convert_page_budget(struct ubifs_info *c)
void ubifs_release_dirty_inode_budget(struct ubifs_info *c,
struct ubifs_inode *ui)
{
- struct ubifs_budget_req req = {.dd_growth = c->inode_budget,
- .dirtied_ino_d = ui->data_len};
+ struct ubifs_budget_req req;
+ memset(&req, 0, sizeof(struct ubifs_budget_req));
+ req.dd_growth = c->inode_budget + ALIGN(ui->data_len, 8);
ubifs_release_budget(c, &req);
}
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index 3b516316c9b3..0a6aa2cc78f0 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -74,6 +74,7 @@ static int do_commit(struct ubifs_info *c)
goto out_up;
}
+ c->cmt_no += 1;
err = ubifs_gc_start_commit(c);
if (err)
goto out_up;
@@ -115,7 +116,7 @@ static int do_commit(struct ubifs_info *c)
goto out;
mutex_lock(&c->mst_mutex);
- c->mst_node->cmt_no = cpu_to_le64(++c->cmt_no);
+ c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
c->mst_node->root_offs = cpu_to_le32(zroot.offs);
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 4e3aaeba4eca..b9cb77473758 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -568,8 +568,8 @@ void dbg_dump_budget_req(const struct ubifs_budget_req *req)
void dbg_dump_lstats(const struct ubifs_lp_stats *lst)
{
spin_lock(&dbg_lock);
- printk(KERN_DEBUG "Lprops statistics: empty_lebs %d, idx_lebs %d\n",
- lst->empty_lebs, lst->idx_lebs);
+ printk(KERN_DEBUG "(pid %d) Lprops statistics: empty_lebs %d, "
+ "idx_lebs %d\n", current->pid, lst->empty_lebs, lst->idx_lebs);
printk(KERN_DEBUG "\ttaken_empty_lebs %d, total_free %lld, "
"total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free,
lst->total_dirty);
@@ -587,8 +587,8 @@ void dbg_dump_budg(struct ubifs_info *c)
struct ubifs_gced_idx_leb *idx_gc;
spin_lock(&dbg_lock);
- printk(KERN_DEBUG "Budgeting info: budg_data_growth %lld, "
- "budg_dd_growth %lld, budg_idx_growth %lld\n",
+ printk(KERN_DEBUG "(pid %d) Budgeting info: budg_data_growth %lld, "
+ "budg_dd_growth %lld, budg_idx_growth %lld\n", current->pid,
c->budg_data_growth, c->budg_dd_growth, c->budg_idx_growth);
printk(KERN_DEBUG "\tdata budget sum %lld, total budget sum %lld, "
"freeable_cnt %d\n", c->budg_data_growth + c->budg_dd_growth,
@@ -634,7 +634,7 @@ void dbg_dump_lprops(struct ubifs_info *c)
struct ubifs_lprops lp;
struct ubifs_lp_stats lst;
- printk(KERN_DEBUG "Dumping LEB properties\n");
+ printk(KERN_DEBUG "(pid %d) Dumping LEB properties\n", current->pid);
ubifs_get_lp_stats(c, &lst);
dbg_dump_lstats(&lst);
@@ -655,7 +655,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum)
if (dbg_failure_mode)
return;
- printk(KERN_DEBUG "Dumping LEB %d\n", lnum);
+ printk(KERN_DEBUG "(pid %d) Dumping LEB %d\n", current->pid, lnum);
sleb = ubifs_scan(c, lnum, 0, c->dbg_buf);
if (IS_ERR(sleb)) {
@@ -720,8 +720,8 @@ void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
{
int i;
- printk(KERN_DEBUG "Dumping heap cat %d (%d elements)\n",
- cat, heap->cnt);
+ printk(KERN_DEBUG "(pid %d) Dumping heap cat %d (%d elements)\n",
+ current->pid, cat, heap->cnt);
for (i = 0; i < heap->cnt; i++) {
struct ubifs_lprops *lprops = heap->arr[i];
@@ -736,7 +736,7 @@ void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
{
int i;
- printk(KERN_DEBUG "Dumping pnode:\n");
+ printk(KERN_DEBUG "(pid %d) Dumping pnode:\n", current->pid);
printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n",
(size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n",
@@ -755,7 +755,7 @@ void dbg_dump_tnc(struct ubifs_info *c)
int level;
printk(KERN_DEBUG "\n");
- printk(KERN_DEBUG "Dumping the TNC tree\n");
+ printk(KERN_DEBUG "(pid %d) Dumping the TNC tree\n", current->pid);
znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
level = znode->level;
printk(KERN_DEBUG "== Level %d ==\n", level);
@@ -2208,16 +2208,17 @@ int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
int offset, int len, int dtype)
{
- int err;
+ int err, failing;
if (in_failure_mode(desc))
return -EIO;
- if (do_fail(desc, lnum, 1))
+ failing = do_fail(desc, lnum, 1);
+ if (failing)
cut_data(buf, len);
err = ubi_leb_write(desc, lnum, buf, offset, len, dtype);
if (err)
return err;
- if (in_failure_mode(desc))
+ if (failing)
return -EIO;
return 0;
}
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 3c4f1e93c9e0..50315fc57185 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -27,7 +27,7 @@
#define UBIFS_DBG(op) op
-#define ubifs_assert(expr) do { \
+#define ubifs_assert(expr) do { \
if (unlikely(!(expr))) { \
printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \
__func__, __LINE__, current->pid); \
@@ -73,50 +73,50 @@ const char *dbg_key_str1(const struct ubifs_info *c,
const union ubifs_key *key);
/*
- * DBGKEY macros require dbg_lock to be held, which it is in the dbg message
+ * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
* macros.
*/
#define DBGKEY(key) dbg_key_str0(c, (key))
#define DBGKEY1(key) dbg_key_str1(c, (key))
/* General messages */
-#define dbg_gen(fmt, ...) dbg_do_msg(UBIFS_MSG_GEN, fmt, ##__VA_ARGS__)
+#define dbg_gen(fmt, ...) dbg_do_msg(UBIFS_MSG_GEN, fmt, ##__VA_ARGS__)
/* Additional journal messages */
-#define dbg_jnl(fmt, ...) dbg_do_msg(UBIFS_MSG_JNL, fmt, ##__VA_ARGS__)
+#define dbg_jnl(fmt, ...) dbg_do_msg(UBIFS_MSG_JNL, fmt, ##__VA_ARGS__)
/* Additional TNC messages */
-#define dbg_tnc(fmt, ...) dbg_do_msg(UBIFS_MSG_TNC, fmt, ##__VA_ARGS__)
+#define dbg_tnc(fmt, ...) dbg_do_msg(UBIFS_MSG_TNC, fmt, ##__VA_ARGS__)
/* Additional lprops messages */
-#define dbg_lp(fmt, ...) dbg_do_msg(UBIFS_MSG_LP, fmt, ##__VA_ARGS__)
+#define dbg_lp(fmt, ...) dbg_do_msg(UBIFS_MSG_LP, fmt, ##__VA_ARGS__)
/* Additional LEB find messages */
-#define dbg_find(fmt, ...) dbg_do_msg(UBIFS_MSG_FIND, fmt, ##__VA_ARGS__)
+#define dbg_find(fmt, ...) dbg_do_msg(UBIFS_MSG_FIND, fmt, ##__VA_ARGS__)
/* Additional mount messages */
-#define dbg_mnt(fmt, ...) dbg_do_msg(UBIFS_MSG_MNT, fmt, ##__VA_ARGS__)
+#define dbg_mnt(fmt, ...) dbg_do_msg(UBIFS_MSG_MNT, fmt, ##__VA_ARGS__)
/* Additional I/O messages */
-#define dbg_io(fmt, ...) dbg_do_msg(UBIFS_MSG_IO, fmt, ##__VA_ARGS__)
+#define dbg_io(fmt, ...) dbg_do_msg(UBIFS_MSG_IO, fmt, ##__VA_ARGS__)
/* Additional commit messages */
-#define dbg_cmt(fmt, ...) dbg_do_msg(UBIFS_MSG_CMT, fmt, ##__VA_ARGS__)
+#define dbg_cmt(fmt, ...) dbg_do_msg(UBIFS_MSG_CMT, fmt, ##__VA_ARGS__)
/* Additional budgeting messages */
-#define dbg_budg(fmt, ...) dbg_do_msg(UBIFS_MSG_BUDG, fmt, ##__VA_ARGS__)
+#define dbg_budg(fmt, ...) dbg_do_msg(UBIFS_MSG_BUDG, fmt, ##__VA_ARGS__)
/* Additional log messages */
-#define dbg_log(fmt, ...) dbg_do_msg(UBIFS_MSG_LOG, fmt, ##__VA_ARGS__)
+#define dbg_log(fmt, ...) dbg_do_msg(UBIFS_MSG_LOG, fmt, ##__VA_ARGS__)
/* Additional gc messages */
-#define dbg_gc(fmt, ...) dbg_do_msg(UBIFS_MSG_GC, fmt, ##__VA_ARGS__)
+#define dbg_gc(fmt, ...) dbg_do_msg(UBIFS_MSG_GC, fmt, ##__VA_ARGS__)
/* Additional scan messages */
-#define dbg_scan(fmt, ...) dbg_do_msg(UBIFS_MSG_SCAN, fmt, ##__VA_ARGS__)
+#define dbg_scan(fmt, ...) dbg_do_msg(UBIFS_MSG_SCAN, fmt, ##__VA_ARGS__)
/* Additional recovery messages */
-#define dbg_rcvry(fmt, ...) dbg_do_msg(UBIFS_MSG_RCVRY, fmt, ##__VA_ARGS__)
+#define dbg_rcvry(fmt, ...) dbg_do_msg(UBIFS_MSG_RCVRY, fmt, ##__VA_ARGS__)
/*
* Debugging message type flags (must match msg_type_names in debug.c).
@@ -239,34 +239,23 @@ typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
struct ubifs_zbranch *zbr, void *priv);
typedef int (*dbg_znode_callback)(struct ubifs_info *c,
struct ubifs_znode *znode, void *priv);
-
int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
dbg_znode_callback znode_cb, void *priv);
/* Checking functions */
int dbg_check_lprops(struct ubifs_info *c);
-
int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot);
int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot);
-
int dbg_check_cats(struct ubifs_info *c);
-
int dbg_check_ltab(struct ubifs_info *c);
-
int dbg_check_synced_i_size(struct inode *inode);
-
int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir);
-
int dbg_check_tnc(struct ubifs_info *c, int extra);
-
int dbg_check_idx_size(struct ubifs_info *c, long long idx_size);
-
int dbg_check_filesystem(struct ubifs_info *c);
-
void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
int add_pos);
-
int dbg_check_lprops(struct ubifs_info *c);
int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
int row, int col);
@@ -329,71 +318,77 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum,
#else /* !CONFIG_UBIFS_FS_DEBUG */
#define UBIFS_DBG(op)
-#define ubifs_assert(expr) ({})
-#define ubifs_assert_cmt_locked(c)
+
+/* Use "if (0)" to make compiler check arguments even if debugging is off */
+#define ubifs_assert(expr) do { \
+ if (0 && (expr)) \
+ printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \
+ __func__, __LINE__, current->pid); \
+} while (0)
+
+#define dbg_err(fmt, ...) do { \
+ if (0) \
+ ubifs_err(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define dbg_msg(fmt, ...) do { \
+ if (0) \
+ printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", \
+ current->pid, __func__, ##__VA_ARGS__); \
+} while (0)
+
#define dbg_dump_stack()
-#define dbg_err(fmt, ...) ({})
-#define dbg_msg(fmt, ...) ({})
-#define dbg_key(c, key, fmt, ...) ({})
-
-#define dbg_gen(fmt, ...) ({})
-#define dbg_jnl(fmt, ...) ({})
-#define dbg_tnc(fmt, ...) ({})
-#define dbg_lp(fmt, ...) ({})
-#define dbg_find(fmt, ...) ({})
-#define dbg_mnt(fmt, ...) ({})
-#define dbg_io(fmt, ...) ({})
-#define dbg_cmt(fmt, ...) ({})
-#define dbg_budg(fmt, ...) ({})
-#define dbg_log(fmt, ...) ({})
-#define dbg_gc(fmt, ...) ({})
-#define dbg_scan(fmt, ...) ({})
-#define dbg_rcvry(fmt, ...) ({})
-
-#define dbg_ntype(type) ""
-#define dbg_cstate(cmt_state) ""
-#define dbg_get_key_dump(c, key) ({})
-#define dbg_dump_inode(c, inode) ({})
-#define dbg_dump_node(c, node) ({})
-#define dbg_dump_budget_req(req) ({})
-#define dbg_dump_lstats(lst) ({})
-#define dbg_dump_budg(c) ({})
-#define dbg_dump_lprop(c, lp) ({})
-#define dbg_dump_lprops(c) ({})
-#define dbg_dump_leb(c, lnum) ({})
-#define dbg_dump_znode(c, znode) ({})
-#define dbg_dump_heap(c, heap, cat) ({})
-#define dbg_dump_pnode(c, pnode, parent, iip) ({})
-#define dbg_dump_tnc(c) ({})
-#define dbg_dump_index(c) ({})
+#define ubifs_assert_cmt_locked(c)
-#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
+#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_jnl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_tnc(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_lp(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_find(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_mnt(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_cmt(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_budg(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_log(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_gc(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_scan(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_rcvry(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+
+#define DBGKEY(key) ((char *)(key))
+#define DBGKEY1(key) ((char *)(key))
+
+#define dbg_ntype(type) ""
+#define dbg_cstate(cmt_state) ""
+#define dbg_get_key_dump(c, key) ({})
+#define dbg_dump_inode(c, inode) ({})
+#define dbg_dump_node(c, node) ({})
+#define dbg_dump_budget_req(req) ({})
+#define dbg_dump_lstats(lst) ({})
+#define dbg_dump_budg(c) ({})
+#define dbg_dump_lprop(c, lp) ({})
+#define dbg_dump_lprops(c) ({})
+#define dbg_dump_leb(c, lnum) ({})
+#define dbg_dump_znode(c, znode) ({})
+#define dbg_dump_heap(c, heap, cat) ({})
+#define dbg_dump_pnode(c, pnode, parent, iip) ({})
+#define dbg_dump_tnc(c) ({})
+#define dbg_dump_index(c) ({})
+#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
#define dbg_old_index_check_init(c, zroot) 0
#define dbg_check_old_index(c, zroot) 0
-
#define dbg_check_cats(c) 0
-
#define dbg_check_ltab(c) 0
-
#define dbg_check_synced_i_size(inode) 0
-
#define dbg_check_dir_size(c, dir) 0
-
#define dbg_check_tnc(c, x) 0
-
#define dbg_check_idx_size(c, idx_size) 0
-
#define dbg_check_filesystem(c) 0
-
#define dbg_check_heap(c, heap, cat, add_pos) ({})
-
#define dbg_check_lprops(c) 0
#define dbg_check_lpt_nodes(c, cnode, row, col) 0
-
#define dbg_force_in_the_gaps_enabled 0
#define dbg_force_in_the_gaps() 0
-
#define dbg_failure_mode 0
#define dbg_failure_mode_registration(c) ({})
#define dbg_failure_mode_deregistration(c) ({})
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index e90374be7d3b..5c96f1fb7016 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -165,7 +165,6 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
}
inode->i_ino = ++c->highest_inum;
- inode->i_generation = ++c->vfs_gen;
/*
* The creation sequence number remains with this inode for its
* lifetime. All nodes for this inode have a greater sequence number,
@@ -220,15 +219,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
err = ubifs_tnc_lookup_nm(c, &key, dent, &dentry->d_name);
if (err) {
- /*
- * Do not hash the direntry if parent 'i_nlink' is zero, because
- * this has side-effects - '->delete_inode()' call will not be
- * called for the parent orphan inode, because 'd_count' of its
- * direntry will stay 1 (it'll be negative direntry I guess)
- * and prevent 'iput_final()' until the dentry is destroyed due
- * to unmount or memory pressure.
- */
- if (err == -ENOENT && dir->i_nlink != 0) {
+ if (err == -ENOENT) {
dbg_gen("not found");
goto done;
}
@@ -525,7 +516,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
struct ubifs_inode *dir_ui = ubifs_inode(dir);
int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len);
struct ubifs_budget_req req = { .new_dent = 1, .dirtied_ino = 2,
- .dirtied_ino_d = ui->data_len };
+ .dirtied_ino_d = ALIGN(ui->data_len, 8) };
/*
* Budget request settings: new direntry, changing the target inode,
@@ -727,8 +718,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
struct ubifs_inode *dir_ui = ubifs_inode(dir);
struct ubifs_info *c = dir->i_sb->s_fs_info;
int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len);
- struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
- .dirtied_ino_d = 1 };
+ struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 };
/*
* Budget request settings: new inode, new direntry and changing parent
@@ -789,7 +779,8 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
int err, devlen = 0;
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
- .new_ino_d = devlen, .dirtied_ino = 1 };
+ .new_ino_d = ALIGN(devlen, 8),
+ .dirtied_ino = 1 };
/*
* Budget request settings: new inode, new direntry and changing parent
@@ -863,7 +854,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
int err, len = strlen(symname);
int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
- .new_ino_d = len, .dirtied_ino = 1 };
+ .new_ino_d = ALIGN(len, 8),
+ .dirtied_ino = 1 };
/*
* Budget request settings: new inode, new direntry and changing parent
@@ -1012,7 +1004,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ubifs_budget_req req = { .new_dent = 1, .mod_dent = 1,
.dirtied_ino = 3 };
struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
- .dirtied_ino_d = old_inode_ui->data_len };
+ .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
struct timespec time;
/*
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 8565e586e533..4071d1cae29f 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -890,7 +890,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
loff_t new_size = attr->ia_size;
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_budget_req req = { .dirtied_ino = 1,
- .dirtied_ino_d = ui->data_len };
+ .dirtied_ino_d = ALIGN(ui->data_len, 8) };
err = ubifs_budget_space(c, &req);
if (err)
@@ -941,7 +941,8 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
struct inode *inode = dentry->d_inode;
struct ubifs_info *c = inode->i_sb->s_fs_info;
- dbg_gen("ino %lu, ia_valid %#x", inode->i_ino, attr->ia_valid);
+ dbg_gen("ino %lu, mode %#x, ia_valid %#x",
+ inode->i_ino, inode->i_mode, attr->ia_valid);
err = inode_change_ok(inode, attr);
if (err)
return err;
@@ -1051,7 +1052,7 @@ static int update_mctime(struct ubifs_info *c, struct inode *inode)
if (mctime_update_needed(inode, &now)) {
int err, release;
struct ubifs_budget_req req = { .dirtied_ino = 1,
- .dirtied_ino_d = ui->data_len };
+ .dirtied_ino_d = ALIGN(ui->data_len, 8) };
err = ubifs_budget_space(c, &req);
if (err)
@@ -1270,6 +1271,7 @@ struct file_operations ubifs_file_operations = {
.fsync = ubifs_fsync,
.unlocked_ioctl = ubifs_ioctl,
.splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
#ifdef CONFIG_COMPAT
.compat_ioctl = ubifs_compat_ioctl,
#endif
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 10394c548367..adee7b5ddeab 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -290,9 +290,14 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp,
idx_lp = idx_heap->arr[0];
sum = idx_lp->free + idx_lp->dirty;
/*
- * Since we reserve twice as more space for the index than it
+ * Since we reserve thrice as much space for the index than it
* actually takes, it does not make sense to pick indexing LEBs
- * with less than half LEB of dirty space.
+ * with less than, say, half LEB of dirty space. May be half is
+ * not the optimal boundary - this should be tested and
+ * checked. This boundary should determine how much we use
+ * in-the-gaps to consolidate the index comparing to how much
+ * we use garbage collector to consolidate it. The "half"
+ * criteria just feels to be fine.
*/
if (sum < min_space || sum < c->half_leb_size)
idx_lp = NULL;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 3374f91b6709..054363f2b207 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -54,6 +54,20 @@
#include "ubifs.h"
/**
+ * ubifs_ro_mode - switch UBIFS to read read-only mode.
+ * @c: UBIFS file-system description object
+ * @err: error code which is the reason of switching to R/O mode
+ */
+void ubifs_ro_mode(struct ubifs_info *c, int err)
+{
+ if (!c->ro_media) {
+ c->ro_media = 1;
+ ubifs_warn("switched to read-only mode, error %d", err);
+ dbg_dump_stack();
+ }
+}
+
+/**
* ubifs_check_node - check node.
* @c: UBIFS file-system description object
* @buf: node to check
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 283155abe5f5..22993f867d19 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -447,13 +447,11 @@ static int get_dent_type(int mode)
* @ino: buffer in which to pack inode node
* @inode: inode to pack
* @last: indicates the last node of the group
- * @last_reference: non-zero if this is a deletion inode
*/
static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
- const struct inode *inode, int last,
- int last_reference)
+ const struct inode *inode, int last)
{
- int data_len = 0;
+ int data_len = 0, last_reference = !inode->i_nlink;
struct ubifs_inode *ui = ubifs_inode(inode);
ino->ch.node_type = UBIFS_INO_NODE;
@@ -596,9 +594,9 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
ubifs_prep_grp_node(c, dent, dlen, 0);
ino = (void *)dent + aligned_dlen;
- pack_inode(c, ino, inode, 0, last_reference);
+ pack_inode(c, ino, inode, 0);
ino = (void *)ino + aligned_ilen;
- pack_inode(c, ino, dir, 1, 0);
+ pack_inode(c, ino, dir, 1);
if (last_reference) {
err = ubifs_add_orphan(c, inode->i_ino);
@@ -606,6 +604,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
release_head(c, BASEHD);
goto out_finish;
}
+ ui->del_cmtno = c->cmt_no;
}
err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
@@ -750,30 +749,25 @@ out_free:
* ubifs_jnl_write_inode - flush inode to the journal.
* @c: UBIFS file-system description object
* @inode: inode to flush
- * @deletion: inode has been deleted
*
* This function writes inode @inode to the journal. If the inode is
* synchronous, it also synchronizes the write-buffer. Returns zero in case of
* success and a negative error code in case of failure.
*/
-int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode,
- int deletion)
+int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
{
- int err, len, lnum, offs, sync = 0;
+ int err, lnum, offs;
struct ubifs_ino_node *ino;
struct ubifs_inode *ui = ubifs_inode(inode);
+ int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
- dbg_jnl("ino %lu%s", inode->i_ino,
- deletion ? " (last reference)" : "");
- if (deletion)
- ubifs_assert(inode->i_nlink == 0);
+ dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
- len = UBIFS_INO_NODE_SZ;
/*
* If the inode is being deleted, do not write the attached data. No
* need to synchronize the write-buffer either.
*/
- if (!deletion) {
+ if (!last_reference) {
len += ui->data_len;
sync = IS_SYNC(inode);
}
@@ -786,7 +780,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode,
if (err)
goto out_free;
- pack_inode(c, ino, inode, 1, deletion);
+ pack_inode(c, ino, inode, 1);
err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
if (err)
goto out_release;
@@ -795,7 +789,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode,
inode->i_ino);
release_head(c, BASEHD);
- if (deletion) {
+ if (last_reference) {
err = ubifs_tnc_remove_ino(c, inode->i_ino);
if (err)
goto out_ro;
@@ -828,6 +822,65 @@ out_free:
}
/**
+ * ubifs_jnl_delete_inode - delete an inode.
+ * @c: UBIFS file-system description object
+ * @inode: inode to delete
+ *
+ * This function deletes inode @inode which includes removing it from orphans,
+ * deleting it from TNC and, in some cases, writing a deletion inode to the
+ * journal.
+ *
+ * When regular file inodes are unlinked or a directory inode is removed, the
+ * 'ubifs_jnl_update()' function writes a corresponding deletion inode and
+ * direntry to the media, and adds the inode to orphans. After this, when the
+ * last reference to this inode has been dropped, this function is called. In
+ * general, it has to write one more deletion inode to the media, because if
+ * a commit happened between 'ubifs_jnl_update()' and
+ * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal
+ * anymore, and in fact it might not be on the flash anymore, because it might
+ * have been garbage-collected already. And for optimization reasons UBIFS does
+ * not read the orphan area if it has been unmounted cleanly, so it would have
+ * no indication in the journal that there is a deleted inode which has to be
+ * removed from TNC.
+ *
+ * However, if there was no commit between 'ubifs_jnl_update()' and
+ * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion
+ * inode to the media for the second time. And this is quite a typical case.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
+{
+ int err;
+ struct ubifs_inode *ui = ubifs_inode(inode);
+
+ ubifs_assert(inode->i_nlink == 0);
+
+ if (ui->del_cmtno != c->cmt_no)
+ /* A commit happened for sure */
+ return ubifs_jnl_write_inode(c, inode);
+
+ down_read(&c->commit_sem);
+ /*
+ * Check commit number again, because the first test has been done
+ * without @c->commit_sem, so a commit might have happened.
+ */
+ if (ui->del_cmtno != c->cmt_no) {
+ up_read(&c->commit_sem);
+ return ubifs_jnl_write_inode(c, inode);
+ }
+
+ err = ubifs_tnc_remove_ino(c, inode->i_ino);
+ if (err)
+ ubifs_ro_mode(c, err);
+ else
+ ubifs_delete_orphan(c, inode->i_ino);
+ up_read(&c->commit_sem);
+ return err;
+}
+
+/**
* ubifs_jnl_rename - rename a directory entry.
* @c: UBIFS file-system description object
* @old_dir: parent inode of directory entry to rename
@@ -917,16 +970,16 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
p = (void *)dent2 + aligned_dlen2;
if (new_inode) {
- pack_inode(c, p, new_inode, 0, last_reference);
+ pack_inode(c, p, new_inode, 0);
p += ALIGN(ilen, 8);
}
if (!move)
- pack_inode(c, p, old_dir, 1, 0);
+ pack_inode(c, p, old_dir, 1);
else {
- pack_inode(c, p, old_dir, 0, 0);
+ pack_inode(c, p, old_dir, 0);
p += ALIGN(plen, 8);
- pack_inode(c, p, new_dir, 1, 0);
+ pack_inode(c, p, new_dir, 1);
}
if (last_reference) {
@@ -935,6 +988,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
release_head(c, BASEHD);
goto out_finish;
}
+ new_ui->del_cmtno = c->cmt_no;
}
err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
@@ -1131,7 +1185,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
if (err)
goto out_free;
- pack_inode(c, ino, inode, 0, 0);
+ pack_inode(c, ino, inode, 0);
ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
if (dlen)
ubifs_prep_grp_node(c, dn, dlen, 1);
@@ -1251,9 +1305,9 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
ubifs_prep_grp_node(c, xent, xlen, 0);
ino = (void *)xent + aligned_xlen;
- pack_inode(c, ino, inode, 0, 1);
+ pack_inode(c, ino, inode, 0);
ino = (void *)ino + UBIFS_INO_NODE_SZ;
- pack_inode(c, ino, host, 1, 0);
+ pack_inode(c, ino, host, 1);
err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
if (!sync && !err)
@@ -1320,7 +1374,7 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
const struct inode *host)
{
int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
- struct ubifs_inode *host_ui = ubifs_inode(inode);
+ struct ubifs_inode *host_ui = ubifs_inode(host);
struct ubifs_ino_node *ino;
union ubifs_key key;
int sync = IS_DIRSYNC(host);
@@ -1344,8 +1398,8 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
if (err)
goto out_free;
- pack_inode(c, ino, host, 0, 0);
- pack_inode(c, (void *)ino + aligned_len1, inode, 1, 0);
+ pack_inode(c, ino, host, 0);
+ pack_inode(c, (void *)ino + aligned_len1, inode, 1);
err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
if (!sync && !err) {
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index 36857b9ed59e..3e0aa7367556 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -317,6 +317,8 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
return 0;
out_unlock:
+ if (err != -EAGAIN)
+ ubifs_ro_mode(c, err);
mutex_unlock(&c->log_mutex);
kfree(ref);
kfree(bud);
@@ -410,7 +412,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
return -ENOMEM;
cs->ch.node_type = UBIFS_CS_NODE;
- cs->cmt_no = cpu_to_le64(c->cmt_no + 1);
+ cs->cmt_no = cpu_to_le64(c->cmt_no);
ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
/*
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index 4beccfc256d2..87dabf9fe742 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -80,20 +80,6 @@ static inline struct ubifs_inode *ubifs_inode(const struct inode *inode)
}
/**
- * ubifs_ro_mode - switch UBIFS to read read-only mode.
- * @c: UBIFS file-system description object
- * @err: error code which is the reason of switching to R/O mode
- */
-static inline void ubifs_ro_mode(struct ubifs_info *c, int err)
-{
- if (!c->ro_media) {
- c->ro_media = 1;
- ubifs_warn("switched to read-only mode, error %d", err);
- dbg_dump_stack();
- }
-}
-
-/**
* ubifs_compr_present - check if compressor was compiled in.
* @compr_type: compressor type to check
*
@@ -322,7 +308,7 @@ static inline long long ubifs_reported_space(const struct ubifs_info *c,
{
int divisor, factor;
- divisor = UBIFS_MAX_DATA_NODE_SZ + (c->max_idx_node_sz << 1);
+ divisor = UBIFS_MAX_DATA_NODE_SZ + (c->max_idx_node_sz * 3);
factor = UBIFS_MAX_DATA_NODE_SZ - UBIFS_DATA_NODE_SZ;
do_div(free, divisor);
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 3afeb9242c6a..02d3462f4d3e 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -310,10 +310,10 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
c->cmt_orphans -= cnt;
spin_unlock(&c->orphan_lock);
if (c->cmt_orphans)
- orph->cmt_no = cpu_to_le64(c->cmt_no + 1);
+ orph->cmt_no = cpu_to_le64(c->cmt_no);
else
/* Mark the last node of the commit */
- orph->cmt_no = cpu_to_le64((c->cmt_no + 1) | (1ULL << 63));
+ orph->cmt_no = cpu_to_le64((c->cmt_no) | (1ULL << 63));
ubifs_assert(c->ohead_offs + len <= c->leb_size);
ubifs_assert(c->ohead_lnum >= c->orph_first);
ubifs_assert(c->ohead_lnum <= c->orph_last);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index ca1e2d4e03cc..f71e6b8822c4 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ctype.h>
-#include <linux/random.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
@@ -149,7 +148,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
if (err)
goto out_invalid;
- /* Disable readahead */
+ /* Disable read-ahead */
inode->i_mapping->backing_dev_info = &c->bdi;
switch (inode->i_mode & S_IFMT) {
@@ -278,7 +277,7 @@ static void ubifs_destroy_inode(struct inode *inode)
*/
static int ubifs_write_inode(struct inode *inode, int wait)
{
- int err;
+ int err = 0;
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
@@ -299,10 +298,18 @@ static int ubifs_write_inode(struct inode *inode, int wait)
return 0;
}
- dbg_gen("inode %lu", inode->i_ino);
- err = ubifs_jnl_write_inode(c, inode, 0);
- if (err)
- ubifs_err("can't write inode %lu, error %d", inode->i_ino, err);
+ /*
+ * As an optimization, do not write orphan inodes to the media just
+ * because this is not needed.
+ */
+ dbg_gen("inode %lu, mode %#x, nlink %u",
+ inode->i_ino, (int)inode->i_mode, inode->i_nlink);
+ if (inode->i_nlink) {
+ err = ubifs_jnl_write_inode(c, inode);
+ if (err)
+ ubifs_err("can't write inode %lu, error %d",
+ inode->i_ino, err);
+ }
ui->dirty = 0;
mutex_unlock(&ui->ui_mutex);
@@ -314,8 +321,9 @@ static void ubifs_delete_inode(struct inode *inode)
{
int err;
struct ubifs_info *c = inode->i_sb->s_fs_info;
+ struct ubifs_inode *ui = ubifs_inode(inode);
- if (ubifs_inode(inode)->xattr)
+ if (ui->xattr)
/*
* Extended attribute inode deletions are fully handled in
* 'ubifs_removexattr()'. These inodes are special and have
@@ -323,7 +331,7 @@ static void ubifs_delete_inode(struct inode *inode)
*/
goto out;
- dbg_gen("inode %lu", inode->i_ino);
+ dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode);
ubifs_assert(!atomic_read(&inode->i_count));
ubifs_assert(inode->i_nlink == 0);
@@ -331,15 +339,19 @@ static void ubifs_delete_inode(struct inode *inode)
if (is_bad_inode(inode))
goto out;
- ubifs_inode(inode)->ui_size = inode->i_size = 0;
- err = ubifs_jnl_write_inode(c, inode, 1);
+ ui->ui_size = inode->i_size = 0;
+ err = ubifs_jnl_delete_inode(c, inode);
if (err)
/*
* Worst case we have a lost orphan inode wasting space, so a
- * simple error message is ok here.
+ * simple error message is OK here.
*/
- ubifs_err("can't write inode %lu, error %d", inode->i_ino, err);
+ ubifs_err("can't delete inode %lu, error %d",
+ inode->i_ino, err);
+
out:
+ if (ui->dirty)
+ ubifs_release_dirty_inode_budget(c, ui);
clear_inode(inode);
}
@@ -1122,8 +1134,8 @@ static int mount_ubifs(struct ubifs_info *c)
if (err)
goto out_infos;
- ubifs_msg("mounted UBI device %d, volume %d", c->vi.ubi_num,
- c->vi.vol_id);
+ ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
+ c->vi.ubi_num, c->vi.vol_id, c->vi.name);
if (mounted_read_only)
ubifs_msg("mounted read-only");
x = (long long)c->main_lebs * c->leb_size;
@@ -1469,6 +1481,7 @@ static void ubifs_put_super(struct super_block *sb)
*/
ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0);
ubifs_assert(c->budg_idx_growth == 0);
+ ubifs_assert(c->budg_dd_growth == 0);
ubifs_assert(c->budg_data_growth == 0);
/*
@@ -1657,7 +1670,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
INIT_LIST_HEAD(&c->orph_new);
c->highest_inum = UBIFS_FIRST_INO;
- get_random_bytes(&c->vfs_gen, sizeof(int));
c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
ubi_get_volume_info(ubi, &c->vi);
@@ -1671,10 +1683,10 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
}
/*
- * UBIFS provids 'backing_dev_info' in order to disable readahead. For
+ * UBIFS provides 'backing_dev_info' in order to disable read-ahead. For
* UBIFS, I/O is not deferred, it is done immediately in readpage,
* which means the user would have to wait not just for their own I/O
- * but the readahead I/O as well i.e. completely pointless.
+ * but the read-ahead I/O as well i.e. completely pointless.
*
* Read-ahead will be disabled because @c->bdi.ra_pages is 0.
*/
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index 8117e65ba2e9..8ac76b1c2d55 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -372,26 +372,25 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
written = layout_leb_in_gaps(c, p);
if (written < 0) {
err = written;
- if (err == -ENOSPC) {
- if (!dbg_force_in_the_gaps_enabled) {
- /*
- * Do not print scary warnings if the
- * debugging option which forces
- * in-the-gaps is enabled.
- */
- ubifs_err("out of space");
- spin_lock(&c->space_lock);
- dbg_dump_budg(c);
- spin_unlock(&c->space_lock);
- dbg_dump_lprops(c);
- }
- /* Try to commit anyway */
- err = 0;
- break;
+ if (err != -ENOSPC) {
+ kfree(c->gap_lebs);
+ c->gap_lebs = NULL;
+ return err;
}
- kfree(c->gap_lebs);
- c->gap_lebs = NULL;
- return err;
+ if (!dbg_force_in_the_gaps_enabled) {
+ /*
+ * Do not print scary warnings if the debugging
+ * option which forces in-the-gaps is enabled.
+ */
+ ubifs_err("out of space");
+ spin_lock(&c->space_lock);
+ dbg_dump_budg(c);
+ spin_unlock(&c->space_lock);
+ dbg_dump_lprops(c);
+ }
+ /* Try to commit anyway */
+ err = 0;
+ break;
}
p++;
cnt -= written;
diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h
index 0cc7da9bed47..bd2121f3426e 100644
--- a/fs/ubifs/ubifs-media.h
+++ b/fs/ubifs/ubifs-media.h
@@ -228,10 +228,10 @@ enum {
/* Minimum number of orphan area logical eraseblocks */
#define UBIFS_MIN_ORPH_LEBS 1
/*
- * Minimum number of main area logical eraseblocks (buds, 2 for the index, 1
+ * Minimum number of main area logical eraseblocks (buds, 3 for the index, 1
* for GC, 1 for deletions, and at least 1 for committed data).
*/
-#define UBIFS_MIN_MAIN_LEBS (UBIFS_MIN_BUD_LEBS + 5)
+#define UBIFS_MIN_MAIN_LEBS (UBIFS_MIN_BUD_LEBS + 6)
/* Minimum number of logical eraseblocks */
#define UBIFS_MIN_LEB_CNT (UBIFS_SB_LEBS + UBIFS_MST_LEBS + \
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index e4f89f271827..d7f706f7a302 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -20,8 +20,6 @@
* Adrian Hunter
*/
-/* Implementation version 0.7 */
-
#ifndef __UBIFS_H__
#define __UBIFS_H__
@@ -322,6 +320,8 @@ struct ubifs_gced_idx_leb {
* struct ubifs_inode - UBIFS in-memory inode description.
* @vfs_inode: VFS inode description object
* @creat_sqnum: sequence number at time of creation
+ * @del_cmtno: commit number corresponding to the time the inode was deleted,
+ * protected by @c->commit_sem;
* @xattr_size: summarized size of all extended attributes in bytes
* @xattr_cnt: count of extended attributes this inode has
* @xattr_names: sum of lengths of all extended attribute names belonging to
@@ -373,6 +373,7 @@ struct ubifs_gced_idx_leb {
struct ubifs_inode {
struct inode vfs_inode;
unsigned long long creat_sqnum;
+ unsigned long long del_cmtno;
unsigned int xattr_size;
unsigned int xattr_cnt;
unsigned int xattr_names;
@@ -779,7 +780,7 @@ struct ubifs_compressor {
/**
* struct ubifs_budget_req - budget requirements of an operation.
*
- * @fast: non-zero if the budgeting should try to aquire budget quickly and
+ * @fast: non-zero if the budgeting should try to acquire budget quickly and
* should not try to call write-back
* @recalculate: non-zero if @idx_growth, @data_growth, and @dd_growth fields
* have to be re-calculated
@@ -805,21 +806,31 @@ struct ubifs_compressor {
* An inode may contain 4KiB of data at max., thus the widths of @new_ino_d
* is 13 bits, and @dirtied_ino_d - 15, because up to 4 inodes may be made
* dirty by the re-name operation.
+ *
+ * Note, UBIFS aligns node lengths to 8-bytes boundary, so the requester has to
+ * make sure the amount of inode data which contribute to @new_ino_d and
+ * @dirtied_ino_d fields are aligned.
*/
struct ubifs_budget_req {
unsigned int fast:1;
unsigned int recalculate:1;
+#ifndef UBIFS_DEBUG
unsigned int new_page:1;
unsigned int dirtied_page:1;
unsigned int new_dent:1;
unsigned int mod_dent:1;
unsigned int new_ino:1;
unsigned int new_ino_d:13;
-#ifndef UBIFS_DEBUG
unsigned int dirtied_ino:4;
unsigned int dirtied_ino_d:15;
#else
/* Not bit-fields to check for overflows */
+ unsigned int new_page;
+ unsigned int dirtied_page;
+ unsigned int new_dent;
+ unsigned int mod_dent;
+ unsigned int new_ino;
+ unsigned int new_ino_d;
unsigned int dirtied_ino;
unsigned int dirtied_ino_d;
#endif
@@ -860,13 +871,13 @@ struct ubifs_mount_opts {
* struct ubifs_info - UBIFS file-system description data structure
* (per-superblock).
* @vfs_sb: VFS @struct super_block object
- * @bdi: backing device info object to make VFS happy and disable readahead
+ * @bdi: backing device info object to make VFS happy and disable read-ahead
*
* @highest_inum: highest used inode number
- * @vfs_gen: VFS inode generation counter
* @max_sqnum: current global sequence number
- * @cmt_no: commit number (last successfully completed commit)
- * @cnt_lock: protects @highest_inum, @vfs_gen, and @max_sqnum counters
+ * @cmt_no: commit number of the last successfully completed commit, protected
+ * by @commit_sem
+ * @cnt_lock: protects @highest_inum and @max_sqnum counters
* @fmt_version: UBIFS on-flash format version
* @uuid: UUID from super block
*
@@ -1103,7 +1114,6 @@ struct ubifs_info {
struct backing_dev_info bdi;
ino_t highest_inum;
- unsigned int vfs_gen;
unsigned long long max_sqnum;
unsigned long long cmt_no;
spinlock_t cnt_lock;
@@ -1346,6 +1356,7 @@ extern struct backing_dev_info ubifs_backing_dev_info;
extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
/* io.c */
+void ubifs_ro_mode(struct ubifs_info *c, int err);
int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len);
int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
int dtype);
@@ -1399,8 +1410,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
int deletion, int xent);
int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
const union ubifs_key *key, const void *buf, int len);
-int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode,
- int last_reference);
+int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode);
+int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode);
int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
const struct dentry *old_dentry,
const struct inode *new_dir,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 1388a078e1a9..649bec78b645 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -61,7 +61,7 @@
/*
* Limit the number of extended attributes per inode so that the total size
- * (xattr_size) is guaranteeded to fit in an 'unsigned int'.
+ * (@xattr_size) is guaranteeded to fit in an 'unsigned int'.
*/
#define MAX_XATTRS_PER_INODE 65535
@@ -103,14 +103,14 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
struct inode *inode;
struct ubifs_inode *ui, *host_ui = ubifs_inode(host);
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
- .new_ino_d = size, .dirtied_ino = 1,
- .dirtied_ino_d = host_ui->data_len};
+ .new_ino_d = ALIGN(size, 8), .dirtied_ino = 1,
+ .dirtied_ino_d = ALIGN(host_ui->data_len, 8) };
if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE)
return -ENOSPC;
/*
* Linux limits the maximum size of the extended attribute names list
- * to %XATTR_LIST_MAX. This means we should not allow creating more*
+ * to %XATTR_LIST_MAX. This means we should not allow creating more
* extended attributes if the name list becomes larger. This limitation
* is artificial for UBIFS, though.
*/
@@ -128,7 +128,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
goto out_budg;
}
- mutex_lock(&host_ui->ui_mutex);
/* Re-define all operations to be "nothing" */
inode->i_mapping->a_ops = &none_address_operations;
inode->i_op = &none_inode_operations;
@@ -141,23 +140,19 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui->data = kmalloc(size, GFP_NOFS);
if (!ui->data) {
err = -ENOMEM;
- goto out_unlock;
+ goto out_free;
}
-
memcpy(ui->data, value, size);
+ inode->i_size = ui->ui_size = size;
+ ui->data_len = size;
+
+ mutex_lock(&host_ui->ui_mutex);
host->i_ctime = ubifs_current_time(host);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
host_ui->xattr_names += nm->len;
- /*
- * We do not use i_size_write() because nobody can race with us as we
- * are holding host @host->i_mutex - every xattr operation for this
- * inode is serialized by it.
- */
- inode->i_size = ui->ui_size = size;
- ui->data_len = size;
err = ubifs_jnl_update(c, host, nm, inode, 0, 1);
if (err)
goto out_cancel;
@@ -172,8 +167,8 @@ out_cancel:
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
-out_unlock:
mutex_unlock(&host_ui->ui_mutex);
+out_free:
make_bad_inode(inode);
iput(inode);
out_budg:
@@ -200,29 +195,28 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
struct ubifs_inode *host_ui = ubifs_inode(host);
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_budget_req req = { .dirtied_ino = 2,
- .dirtied_ino_d = size + host_ui->data_len };
+ .dirtied_ino_d = ALIGN(size, 8) + ALIGN(host_ui->data_len, 8) };
ubifs_assert(ui->data_len == inode->i_size);
err = ubifs_budget_space(c, &req);
if (err)
return err;
- mutex_lock(&host_ui->ui_mutex);
- host->i_ctime = ubifs_current_time(host);
- host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
- host_ui->xattr_size += CALC_XATTR_BYTES(size);
-
kfree(ui->data);
ui->data = kmalloc(size, GFP_NOFS);
if (!ui->data) {
err = -ENOMEM;
- goto out_unlock;
+ goto out_free;
}
-
memcpy(ui->data, value, size);
inode->i_size = ui->ui_size = size;
ui->data_len = size;
+ mutex_lock(&host_ui->ui_mutex);
+ host->i_ctime = ubifs_current_time(host);
+ host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
+ host_ui->xattr_size += CALC_XATTR_BYTES(size);
+
/*
* It is important to write the host inode after the xattr inode
* because if the host inode gets synchronized (via 'fsync()'), then
@@ -240,9 +234,9 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
- make_bad_inode(inode);
-out_unlock:
mutex_unlock(&host_ui->ui_mutex);
+ make_bad_inode(inode);
+out_free:
ubifs_release_budget(c, &req);
return err;
}
@@ -312,6 +306,7 @@ int ubifs_setxattr(struct dentry *dentry, const char *name,
dbg_gen("xattr '%s', host ino %lu ('%.*s'), size %zd", name,
host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+ ubifs_assert(mutex_is_locked(&host->i_mutex));
if (size > UBIFS_MAX_INO_DATA)
return -ERANGE;
@@ -384,7 +379,6 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
if (!xent)
return -ENOMEM;
- mutex_lock(&host->i_mutex);
xent_key_init(c, &key, host->i_ino, &nm);
err = ubifs_tnc_lookup_nm(c, &key, xent, &nm);
if (err) {
@@ -419,7 +413,6 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
out_iput:
iput(inode);
out_unlock:
- mutex_unlock(&host->i_mutex);
kfree(xent);
return err;
}
@@ -449,8 +442,6 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
return -ERANGE;
lowest_xent_key(c, &key, host->i_ino);
-
- mutex_lock(&host->i_mutex);
while (1) {
int type;
@@ -479,7 +470,6 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
pxent = xent;
key_read(c, &xent->key, &key);
}
- mutex_unlock(&host->i_mutex);
kfree(pxent);
if (err != -ENOENT) {
@@ -497,8 +487,8 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
int err;
struct ubifs_inode *host_ui = ubifs_inode(host);
struct ubifs_inode *ui = ubifs_inode(inode);
- struct ubifs_budget_req req = { .dirtied_ino = 1, .mod_dent = 1,
- .dirtied_ino_d = host_ui->data_len };
+ struct ubifs_budget_req req = { .dirtied_ino = 2, .mod_dent = 1,
+ .dirtied_ino_d = ALIGN(host_ui->data_len, 8) };
ubifs_assert(ui->data_len == inode->i_size);
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
deleted file mode 100644
index 3abe7e9ceb33..000000000000
--- a/fs/xfs/linux-2.6/sema.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_SEMA_H__
-#define __XFS_SUPPORT_SEMA_H__
-
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/semaphore.h>
-#include <asm/atomic.h>
-
-/*
- * sema_t structure just maps to struct semaphore in Linux kernel.
- */
-
-typedef struct semaphore sema_t;
-
-#define initnsema(sp, val, name) sema_init(sp, val)
-#define psema(sp, b) down(sp)
-#define vsema(sp) up(sp)
-#define freesema(sema) do { } while (0)
-
-static inline int issemalocked(sema_t *sp)
-{
- return down_trylock(sp) || (up(sp), 0);
-}
-
-/*
- * Map cpsema (try to get the sema) to down_trylock. We need to switch
- * the return values since cpsema returns 1 (acquired) 0 (failed) and
- * down_trylock returns the reverse 0 (acquired) 1 (failed).
- */
-static inline int cpsema(sema_t *sp)
-{
- return down_trylock(sp) ? 0 : 1;
-}
-
-#endif /* __XFS_SUPPORT_SEMA_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index fa47e43b8b41..f42f80a3b1fa 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -73,7 +73,6 @@ xfs_page_trace(
unsigned long pgoff)
{
xfs_inode_t *ip;
- bhv_vnode_t *vp = vn_from_inode(inode);
loff_t isize = i_size_read(inode);
loff_t offset = page_offset(page);
int delalloc = -1, unmapped = -1, unwritten = -1;
@@ -81,7 +80,7 @@ xfs_page_trace(
if (page_has_buffers(page))
xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- ip = xfs_vtoi(vp);
+ ip = XFS_I(inode);
if (!ip->i_rwtrace)
return;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 9cc8f0213095..986061ae1b9b 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -58,7 +58,7 @@ xfs_buf_trace(
bp, id,
(void *)(unsigned long)bp->b_flags,
(void *)(unsigned long)bp->b_hold.counter,
- (void *)(unsigned long)bp->b_sema.count.counter,
+ (void *)(unsigned long)bp->b_sema.count,
(void *)current,
data, ra,
(void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
@@ -253,7 +253,7 @@ _xfs_buf_initialize(
memset(bp, 0, sizeof(xfs_buf_t));
atomic_set(&bp->b_hold, 1);
- init_MUTEX_LOCKED(&bp->b_iodonesema);
+ init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_list);
INIT_LIST_HEAD(&bp->b_hash_list);
init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
@@ -838,6 +838,7 @@ xfs_buf_rele(
return;
}
+ ASSERT(atomic_read(&bp->b_hold) > 0);
if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
if (bp->b_relse) {
atomic_inc(&bp->b_hold);
@@ -851,11 +852,6 @@ xfs_buf_rele(
spin_unlock(&hash->bh_lock);
xfs_buf_free(bp);
}
- } else {
- /*
- * Catch reference count leaks
- */
- ASSERT(atomic_read(&bp->b_hold) >= 0);
}
}
@@ -1037,7 +1033,7 @@ xfs_buf_ioend(
xfs_buf_iodone_work(&bp->b_iodone_work);
}
} else {
- up(&bp->b_iodonesema);
+ complete(&bp->b_iowait);
}
}
@@ -1275,7 +1271,7 @@ xfs_buf_iowait(
XB_TRACE(bp, "iowait", 0);
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
- down(&bp->b_iodonesema);
+ wait_for_completion(&bp->b_iowait);
XB_TRACE(bp, "iowaited", (long)bp->b_error);
return bp->b_error;
}
@@ -1799,7 +1795,7 @@ int __init
xfs_buf_init(void)
{
#ifdef XFS_BUF_TRACE
- xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
+ xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
#endif
xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 29d1d4adc078..fe0109956656 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -157,7 +157,7 @@ typedef struct xfs_buf {
xfs_buf_iodone_t b_iodone; /* I/O completion function */
xfs_buf_relse_t b_relse; /* releasing function */
xfs_buf_bdstrat_t b_strat; /* pre-write function */
- struct semaphore b_iodonesema; /* Semaphore for I/O waiters */
+ struct completion b_iowait; /* queue for I/O waiters */
void *b_fspriv;
void *b_fspriv2;
void *b_fspriv3;
@@ -352,7 +352,7 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
-#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
+#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait);
#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
#define XFS_BUF_TARGET(bp) ((bp)->b_target)
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 987fe84f7b13..24fd598af846 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -139,7 +139,7 @@ xfs_nfs_get_inode(
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return ip->i_vnode;
+ return VFS_I(ip);
}
STATIC struct dentry *
@@ -167,7 +167,7 @@ xfs_fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
if (!inode)
return NULL;
if (IS_ERR(inode))
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
result = d_alloc_anon(inode);
if (!result) {
iput(inode);
@@ -198,7 +198,7 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
if (!inode)
return NULL;
if (IS_ERR(inode))
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
result = d_alloc_anon(inode);
if (!result) {
iput(inode);
@@ -219,9 +219,9 @@ xfs_fs_get_parent(
if (unlikely(error))
return ERR_PTR(-error);
- parent = d_alloc_anon(cip->i_vnode);
+ parent = d_alloc_anon(VFS_I(cip));
if (unlikely(!parent)) {
- iput(cip->i_vnode);
+ iput(VFS_I(cip));
return ERR_PTR(-ENOMEM);
}
return parent;
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 1eefe61f0e10..36caa6d957df 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -31,7 +31,7 @@ xfs_tosspages(
xfs_off_t last,
int fiopt)
{
- struct address_space *mapping = ip->i_vnode->i_mapping;
+ struct address_space *mapping = VFS_I(ip)->i_mapping;
if (mapping->nrpages)
truncate_inode_pages(mapping, first);
@@ -44,7 +44,7 @@ xfs_flushinval_pages(
xfs_off_t last,
int fiopt)
{
- struct address_space *mapping = ip->i_vnode->i_mapping;
+ struct address_space *mapping = VFS_I(ip)->i_mapping;
int ret = 0;
if (mapping->nrpages) {
@@ -64,7 +64,7 @@ xfs_flush_pages(
uint64_t flags,
int fiopt)
{
- struct address_space *mapping = ip->i_vnode->i_mapping;
+ struct address_space *mapping = VFS_I(ip)->i_mapping;
int ret = 0;
int ret2;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index acb978d9d085..48799ba7e3e6 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -245,7 +245,7 @@ xfs_vget_fsop_handlereq(
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- *inode = XFS_ITOV(ip);
+ *inode = VFS_I(ip);
return 0;
}
@@ -927,7 +927,7 @@ STATIC void
xfs_diflags_to_linux(
struct xfs_inode *ip)
{
- struct inode *inode = XFS_ITOV(ip);
+ struct inode *inode = VFS_I(ip);
unsigned int xflags = xfs_ip2xflags(ip);
if (xflags & XFS_XFLAG_IMMUTABLE)
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index e88f51028086..91bcd979242c 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -62,7 +62,7 @@ void
xfs_synchronize_atime(
xfs_inode_t *ip)
{
- struct inode *inode = ip->i_vnode;
+ struct inode *inode = VFS_I(ip);
if (inode) {
ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
@@ -79,7 +79,7 @@ void
xfs_mark_inode_dirty_sync(
xfs_inode_t *ip)
{
- struct inode *inode = ip->i_vnode;
+ struct inode *inode = VFS_I(ip);
if (inode)
mark_inode_dirty_sync(inode);
@@ -89,36 +89,31 @@ xfs_mark_inode_dirty_sync(
* Change the requested timestamp in the given inode.
* We don't lock across timestamp updates, and we don't log them but
* we do record the fact that there is dirty information in core.
- *
- * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG
- * with XFS_ICHGTIME_ACC to be sure that access time
- * update will take. Calling first with XFS_ICHGTIME_ACC
- * and then XFS_ICHGTIME_MOD may fail to modify the access
- * timestamp if the filesystem is mounted noacctm.
*/
void
xfs_ichgtime(
xfs_inode_t *ip,
int flags)
{
- struct inode *inode = vn_to_inode(XFS_ITOV(ip));
+ struct inode *inode = VFS_I(ip);
timespec_t tv;
+ int sync_it = 0;
+
+ tv = current_fs_time(inode->i_sb);
- nanotime(&tv);
- if (flags & XFS_ICHGTIME_MOD) {
+ if ((flags & XFS_ICHGTIME_MOD) &&
+ !timespec_equal(&inode->i_mtime, &tv)) {
inode->i_mtime = tv;
ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
+ sync_it = 1;
}
- if (flags & XFS_ICHGTIME_ACC) {
- inode->i_atime = tv;
- ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec;
- ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec;
- }
- if (flags & XFS_ICHGTIME_CHG) {
+ if ((flags & XFS_ICHGTIME_CHG) &&
+ !timespec_equal(&inode->i_ctime, &tv)) {
inode->i_ctime = tv;
ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec;
ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec;
+ sync_it = 1;
}
/*
@@ -130,55 +125,11 @@ xfs_ichgtime(
* ensure that the compiler does not reorder the update
* of i_update_core above the timestamp updates above.
*/
- SYNCHRONIZE();
- ip->i_update_core = 1;
- if (!(inode->i_state & I_NEW))
+ if (sync_it) {
+ SYNCHRONIZE();
+ ip->i_update_core = 1;
mark_inode_dirty_sync(inode);
-}
-
-/*
- * Variant on the above which avoids querying the system clock
- * in situations where we know the Linux inode timestamps have
- * just been updated (and so we can update our inode cheaply).
- */
-void
-xfs_ichgtime_fast(
- xfs_inode_t *ip,
- struct inode *inode,
- int flags)
-{
- timespec_t *tvp;
-
- /*
- * Atime updates for read() & friends are handled lazily now, and
- * explicit updates must go through xfs_ichgtime()
- */
- ASSERT((flags & XFS_ICHGTIME_ACC) == 0);
-
- if (flags & XFS_ICHGTIME_MOD) {
- tvp = &inode->i_mtime;
- ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec;
- ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec;
}
- if (flags & XFS_ICHGTIME_CHG) {
- tvp = &inode->i_ctime;
- ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec;
- ip->i_d.di_ctime.t_nsec = (__int32_t)tvp->tv_nsec;
- }
-
- /*
- * We update the i_update_core field _after_ changing
- * the timestamps in order to coordinate properly with
- * xfs_iflush() so that we don't lose timestamp updates.
- * This keeps us from having to hold the inode lock
- * while doing this. We use the SYNCHRONIZE macro to
- * ensure that the compiler does not reorder the update
- * of i_update_core above the timestamp updates above.
- */
- SYNCHRONIZE();
- ip->i_update_core = 1;
- if (!(inode->i_state & I_NEW))
- mark_inode_dirty_sync(inode);
}
/*
@@ -299,7 +250,7 @@ xfs_vn_mknod(
if (unlikely(error))
goto out_free_acl;
- inode = ip->i_vnode;
+ inode = VFS_I(ip);
error = xfs_init_security(inode, dir);
if (unlikely(error))
@@ -366,7 +317,7 @@ xfs_vn_lookup(
return NULL;
}
- return d_splice_alias(cip->i_vnode, dentry);
+ return d_splice_alias(VFS_I(cip), dentry);
}
STATIC struct dentry *
@@ -399,12 +350,12 @@ xfs_vn_ci_lookup(
/* if exact match, just splice and exit */
if (!ci_name.name)
- return d_splice_alias(ip->i_vnode, dentry);
+ return d_splice_alias(VFS_I(ip), dentry);
/* else case-insensitive match... */
dname.name = ci_name.name;
dname.len = ci_name.len;
- dentry = d_add_ci(ip->i_vnode, dentry, &dname);
+ dentry = d_add_ci(VFS_I(ip), dentry, &dname);
kmem_free(ci_name.name);
return dentry;
}
@@ -478,7 +429,7 @@ xfs_vn_symlink(
if (unlikely(error))
goto out;
- inode = cip->i_vnode;
+ inode = VFS_I(cip);
error = xfs_init_security(inode, dir);
if (unlikely(error))
@@ -710,7 +661,7 @@ out_error:
return error;
}
-const struct inode_operations xfs_inode_operations = {
+static const struct inode_operations xfs_inode_operations = {
.permission = xfs_vn_permission,
.truncate = xfs_vn_truncate,
.getattr = xfs_vn_getattr,
@@ -722,7 +673,7 @@ const struct inode_operations xfs_inode_operations = {
.fallocate = xfs_vn_fallocate,
};
-const struct inode_operations xfs_dir_inode_operations = {
+static const struct inode_operations xfs_dir_inode_operations = {
.create = xfs_vn_create,
.lookup = xfs_vn_lookup,
.link = xfs_vn_link,
@@ -747,7 +698,7 @@ const struct inode_operations xfs_dir_inode_operations = {
.listxattr = xfs_vn_listxattr,
};
-const struct inode_operations xfs_dir_ci_inode_operations = {
+static const struct inode_operations xfs_dir_ci_inode_operations = {
.create = xfs_vn_create,
.lookup = xfs_vn_ci_lookup,
.link = xfs_vn_link,
@@ -772,7 +723,7 @@ const struct inode_operations xfs_dir_ci_inode_operations = {
.listxattr = xfs_vn_listxattr,
};
-const struct inode_operations xfs_symlink_inode_operations = {
+static const struct inode_operations xfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = xfs_vn_follow_link,
.put_link = xfs_vn_put_link,
@@ -784,3 +735,98 @@ const struct inode_operations xfs_symlink_inode_operations = {
.removexattr = generic_removexattr,
.listxattr = xfs_vn_listxattr,
};
+
+STATIC void
+xfs_diflags_to_iflags(
+ struct inode *inode,
+ struct xfs_inode *ip)
+{
+ if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
+ inode->i_flags |= S_IMMUTABLE;
+ else
+ inode->i_flags &= ~S_IMMUTABLE;
+ if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+ inode->i_flags |= S_APPEND;
+ else
+ inode->i_flags &= ~S_APPEND;
+ if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
+ inode->i_flags |= S_SYNC;
+ else
+ inode->i_flags &= ~S_SYNC;
+ if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
+ inode->i_flags |= S_NOATIME;
+ else
+ inode->i_flags &= ~S_NOATIME;
+}
+
+/*
+ * Initialize the Linux inode, set up the operation vectors and
+ * unlock the inode.
+ *
+ * When reading existing inodes from disk this is called directly
+ * from xfs_iget, when creating a new inode it is called from
+ * xfs_ialloc after setting up the inode.
+ */
+void
+xfs_setup_inode(
+ struct xfs_inode *ip)
+{
+ struct inode *inode = ip->i_vnode;
+
+ inode->i_mode = ip->i_d.di_mode;
+ inode->i_nlink = ip->i_d.di_nlink;
+ inode->i_uid = ip->i_d.di_uid;
+ inode->i_gid = ip->i_d.di_gid;
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFBLK:
+ case S_IFCHR:
+ inode->i_rdev =
+ MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
+ sysv_minor(ip->i_df.if_u2.if_rdev));
+ break;
+ default:
+ inode->i_rdev = 0;
+ break;
+ }
+
+ inode->i_generation = ip->i_d.di_gen;
+ i_size_write(inode, ip->i_d.di_size);
+ inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
+ inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
+ inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
+ inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
+ inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
+ inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
+ xfs_diflags_to_iflags(inode, ip);
+ xfs_iflags_clear(ip, XFS_IMODIFIED);
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_op = &xfs_inode_operations;
+ inode->i_fop = &xfs_file_operations;
+ inode->i_mapping->a_ops = &xfs_address_space_operations;
+ break;
+ case S_IFDIR:
+ if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
+ inode->i_op = &xfs_dir_ci_inode_operations;
+ else
+ inode->i_op = &xfs_dir_inode_operations;
+ inode->i_fop = &xfs_dir_file_operations;
+ break;
+ case S_IFLNK:
+ inode->i_op = &xfs_symlink_inode_operations;
+ if (!(ip->i_df.if_flags & XFS_IFINLINE))
+ inode->i_mapping->a_ops = &xfs_address_space_operations;
+ break;
+ default:
+ inode->i_op = &xfs_inode_operations;
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ break;
+ }
+
+ xfs_iflags_clear(ip, XFS_INEW);
+ barrier();
+
+ unlock_new_inode(inode);
+}
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
index d97ba934a2ac..8b1a1e31dc21 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -18,10 +18,7 @@
#ifndef __XFS_IOPS_H__
#define __XFS_IOPS_H__
-extern const struct inode_operations xfs_inode_operations;
-extern const struct inode_operations xfs_dir_inode_operations;
-extern const struct inode_operations xfs_dir_ci_inode_operations;
-extern const struct inode_operations xfs_symlink_inode_operations;
+struct xfs_inode;
extern const struct file_operations xfs_file_operations;
extern const struct file_operations xfs_dir_file_operations;
@@ -29,14 +26,6 @@ extern const struct file_operations xfs_invis_file_operations;
extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
-struct xfs_inode;
-extern void xfs_ichgtime(struct xfs_inode *, int);
-extern void xfs_ichgtime_fast(struct xfs_inode *, struct inode *, int);
-
-#define xfs_vtoi(vp) \
- ((struct xfs_inode *)vn_to_inode(vp)->i_private)
-
-#define XFS_I(inode) \
- ((struct xfs_inode *)(inode)->i_private)
+extern void xfs_setup_inode(struct xfs_inode *);
#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 4d45d9351a6c..cc0f7b3a9795 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -45,13 +45,13 @@
#include <mrlock.h>
#include <sv.h>
#include <mutex.h>
-#include <sema.h>
#include <time.h>
#include <support/ktrace.h>
#include <support/debug.h>
#include <support/uuid.h>
+#include <linux/semaphore.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
@@ -126,8 +126,6 @@
#define current_cpu() (raw_smp_processor_id())
#define current_pid() (current->pid)
-#define current_fsuid(cred) (current->fsuid)
-#define current_fsgid(cred) (current->fsgid)
#define current_test_flags(f) (current->flags & (f))
#define current_set_flags_nested(sp, f) \
(*(sp) = current->flags, current->flags |= (f))
@@ -180,7 +178,7 @@
#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
#define xfs_stack_trace() dump_stack()
#define xfs_itruncate_data(ip, off) \
- (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off)))
+ (-vmtruncate(VFS_I(ip), (off)))
/* Move the kernel do_div definition off to one side */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 82333b3e118e..1957e5357d04 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -137,7 +137,7 @@ xfs_iozero(
struct address_space *mapping;
int status;
- mapping = ip->i_vnode->i_mapping;
+ mapping = VFS_I(ip)->i_mapping;
do {
unsigned offset, bytes;
void *fsdata;
@@ -674,9 +674,7 @@ start:
*/
if (likely(!(ioflags & IO_INVIS) &&
!mnt_want_write(file->f_path.mnt))) {
- file_update_time(file);
- xfs_ichgtime_fast(xip, inode,
- XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
mnt_drop_write(file->f_path.mnt);
}
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 30ae96397e31..73c65f19e549 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -581,118 +581,6 @@ xfs_max_file_offset(
return (((__uint64_t)pagefactor) << bitshift) - 1;
}
-STATIC_INLINE void
-xfs_set_inodeops(
- struct inode *inode)
-{
- switch (inode->i_mode & S_IFMT) {
- case S_IFREG:
- inode->i_op = &xfs_inode_operations;
- inode->i_fop = &xfs_file_operations;
- inode->i_mapping->a_ops = &xfs_address_space_operations;
- break;
- case S_IFDIR:
- if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
- inode->i_op = &xfs_dir_ci_inode_operations;
- else
- inode->i_op = &xfs_dir_inode_operations;
- inode->i_fop = &xfs_dir_file_operations;
- break;
- case S_IFLNK:
- inode->i_op = &xfs_symlink_inode_operations;
- if (!(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE))
- inode->i_mapping->a_ops = &xfs_address_space_operations;
- break;
- default:
- inode->i_op = &xfs_inode_operations;
- init_special_inode(inode, inode->i_mode, inode->i_rdev);
- break;
- }
-}
-
-STATIC_INLINE void
-xfs_revalidate_inode(
- xfs_mount_t *mp,
- bhv_vnode_t *vp,
- xfs_inode_t *ip)
-{
- struct inode *inode = vn_to_inode(vp);
-
- inode->i_mode = ip->i_d.di_mode;
- inode->i_nlink = ip->i_d.di_nlink;
- inode->i_uid = ip->i_d.di_uid;
- inode->i_gid = ip->i_d.di_gid;
-
- switch (inode->i_mode & S_IFMT) {
- case S_IFBLK:
- case S_IFCHR:
- inode->i_rdev =
- MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
- sysv_minor(ip->i_df.if_u2.if_rdev));
- break;
- default:
- inode->i_rdev = 0;
- break;
- }
-
- inode->i_generation = ip->i_d.di_gen;
- i_size_write(inode, ip->i_d.di_size);
- inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
- inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
- inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
- inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
- inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
- inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
- if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
- inode->i_flags |= S_IMMUTABLE;
- else
- inode->i_flags &= ~S_IMMUTABLE;
- if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
- inode->i_flags |= S_APPEND;
- else
- inode->i_flags &= ~S_APPEND;
- if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
- inode->i_flags |= S_SYNC;
- else
- inode->i_flags &= ~S_SYNC;
- if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
- inode->i_flags |= S_NOATIME;
- else
- inode->i_flags &= ~S_NOATIME;
- xfs_iflags_clear(ip, XFS_IMODIFIED);
-}
-
-void
-xfs_initialize_vnode(
- struct xfs_mount *mp,
- bhv_vnode_t *vp,
- struct xfs_inode *ip)
-{
- struct inode *inode = vn_to_inode(vp);
-
- if (!ip->i_vnode) {
- ip->i_vnode = vp;
- inode->i_private = ip;
- }
-
- /*
- * We need to set the ops vectors, and unlock the inode, but if
- * we have been called during the new inode create process, it is
- * too early to fill in the Linux inode. We will get called a
- * second time once the inode is properly set up, and then we can
- * finish our work.
- */
- if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
- xfs_revalidate_inode(mp, vp, ip);
- xfs_set_inodeops(inode);
-
- xfs_iflags_clear(ip, XFS_INEW);
- barrier();
-
- unlock_new_inode(inode);
- }
-}
-
int
xfs_blkdev_get(
xfs_mount_t *mp,
@@ -982,26 +870,21 @@ STATIC struct inode *
xfs_fs_alloc_inode(
struct super_block *sb)
{
- bhv_vnode_t *vp;
-
- vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
- if (unlikely(!vp))
- return NULL;
- return vn_to_inode(vp);
+ return kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
}
STATIC void
xfs_fs_destroy_inode(
struct inode *inode)
{
- kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
+ kmem_zone_free(xfs_vnode_zone, inode);
}
STATIC void
xfs_fs_inode_init_once(
void *vnode)
{
- inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
+ inode_init_once((struct inode *)vnode);
}
/*
@@ -1106,7 +989,7 @@ void
xfs_flush_inode(
xfs_inode_t *ip)
{
- struct inode *inode = ip->i_vnode;
+ struct inode *inode = VFS_I(ip);
igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
@@ -1131,7 +1014,7 @@ void
xfs_flush_device(
xfs_inode_t *ip)
{
- struct inode *inode = vn_to_inode(XFS_ITOV(ip));
+ struct inode *inode = VFS_I(ip);
igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
@@ -1201,6 +1084,15 @@ xfssyncd(
}
STATIC void
+xfs_free_fsname(
+ struct xfs_mount *mp)
+{
+ kfree(mp->m_fsname);
+ kfree(mp->m_rtname);
+ kfree(mp->m_logname);
+}
+
+STATIC void
xfs_fs_put_super(
struct super_block *sb)
{
@@ -1239,8 +1131,6 @@ xfs_fs_put_super(
error = xfs_unmount_flush(mp, 0);
WARN_ON(error);
- IRELE(rip);
-
/*
* If we're forcing a shutdown, typically because of a media error,
* we want to make sure we invalidate dirty pages that belong to
@@ -1257,10 +1147,12 @@ xfs_fs_put_super(
}
xfs_unmountfs(mp);
+ xfs_freesb(mp);
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
xfs_qmops_put(mp);
xfs_dmops_put(mp);
+ xfs_free_fsname(mp);
kfree(mp);
}
@@ -1517,6 +1409,8 @@ xfs_start_flags(
struct xfs_mount_args *ap,
struct xfs_mount *mp)
{
+ int error;
+
/* Values are in BBs */
if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
/*
@@ -1549,17 +1443,27 @@ xfs_start_flags(
ap->logbufsize);
return XFS_ERROR(EINVAL);
}
+
+ error = ENOMEM;
+
mp->m_logbsize = ap->logbufsize;
mp->m_fsname_len = strlen(ap->fsname) + 1;
- mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP);
- strcpy(mp->m_fsname, ap->fsname);
+
+ mp->m_fsname = kstrdup(ap->fsname, GFP_KERNEL);
+ if (!mp->m_fsname)
+ goto out;
+
if (ap->rtname[0]) {
- mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP);
- strcpy(mp->m_rtname, ap->rtname);
+ mp->m_rtname = kstrdup(ap->rtname, GFP_KERNEL);
+ if (!mp->m_rtname)
+ goto out_free_fsname;
+
}
+
if (ap->logname[0]) {
- mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP);
- strcpy(mp->m_logname, ap->logname);
+ mp->m_logname = kstrdup(ap->logname, GFP_KERNEL);
+ if (!mp->m_logname)
+ goto out_free_rtname;
}
if (ap->flags & XFSMNT_WSYNC)
@@ -1632,6 +1536,14 @@ xfs_start_flags(
if (ap->flags & XFSMNT_DMAPI)
mp->m_flags |= XFS_MOUNT_DMAPI;
return 0;
+
+
+ out_free_rtname:
+ kfree(mp->m_rtname);
+ out_free_fsname:
+ kfree(mp->m_fsname);
+ out:
+ return error;
}
/*
@@ -1792,10 +1704,10 @@ xfs_fs_fill_super(
*/
error = xfs_start_flags(args, mp);
if (error)
- goto out_destroy_counters;
+ goto out_free_fsname;
error = xfs_readsb(mp, flags);
if (error)
- goto out_destroy_counters;
+ goto out_free_fsname;
error = xfs_finish_flags(args, mp);
if (error)
goto out_free_sb;
@@ -1811,7 +1723,7 @@ xfs_fs_fill_super(
if (error)
goto out_free_sb;
- error = xfs_mountfs(mp, flags);
+ error = xfs_mountfs(mp);
if (error)
goto out_filestream_unmount;
@@ -1825,7 +1737,7 @@ xfs_fs_fill_super(
sb->s_time_gran = 1;
set_posix_acl_flag(sb);
- root = igrab(mp->m_rootip->i_vnode);
+ root = igrab(VFS_I(mp->m_rootip));
if (!root) {
error = ENOENT;
goto fail_unmount;
@@ -1857,7 +1769,8 @@ xfs_fs_fill_super(
xfs_filestream_unmount(mp);
out_free_sb:
xfs_freesb(mp);
- out_destroy_counters:
+ out_free_fsname:
+ xfs_free_fsname(mp);
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
out_put_qmops:
@@ -1890,10 +1803,8 @@ xfs_fs_fill_super(
error = xfs_unmount_flush(mp, 0);
WARN_ON(error);
- IRELE(mp->m_rootip);
-
xfs_unmountfs(mp);
- goto out_destroy_counters;
+ goto out_free_sb;
}
STATIC int
@@ -2014,7 +1925,7 @@ xfs_free_trace_bufs(void)
STATIC int __init
xfs_init_zones(void)
{
- xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
+ xfs_vnode_zone = kmem_zone_init_flags(sizeof(struct inode), "xfs_vnode",
KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
KM_ZONE_SPREAD,
xfs_fs_inode_init_once);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index b7d13da01bd6..fe2ef4e6a0f9 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -101,9 +101,6 @@ struct block_device;
extern __uint64_t xfs_max_file_offset(unsigned int);
-extern void xfs_initialize_vnode(struct xfs_mount *mp, bhv_vnode_t *vp,
- struct xfs_inode *ip);
-
extern void xfs_flush_inode(struct xfs_inode *);
extern void xfs_flush_device(struct xfs_inode *);
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 25488b6d9881..b52528bbbfff 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -33,7 +33,7 @@
/*
- * Dedicated vnode inactive/reclaim sync semaphores.
+ * Dedicated vnode inactive/reclaim sync wait queues.
* Prime number of hash buckets since address is used as the key.
*/
#define NVSYNC 37
@@ -82,24 +82,6 @@ vn_ioerror(
xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l);
}
-
-/*
- * Add a reference to a referenced vnode.
- */
-bhv_vnode_t *
-vn_hold(
- bhv_vnode_t *vp)
-{
- struct inode *inode;
-
- XFS_STATS_INC(vn_hold);
-
- inode = igrab(vn_to_inode(vp));
- ASSERT(inode);
-
- return vp;
-}
-
#ifdef XFS_INODE_TRACE
/*
@@ -108,7 +90,7 @@ vn_hold(
*/
static inline int xfs_icount(struct xfs_inode *ip)
{
- bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
+ struct inode *vp = VFS_I(ip);
if (vp)
return vn_count(vp);
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 41ca2cec5d31..683ce16210ff 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -22,20 +22,6 @@ struct file;
struct xfs_iomap;
struct attrlist_cursor_kern;
-typedef struct inode bhv_vnode_t;
-
-/*
- * Vnode to Linux inode mapping.
- */
-static inline bhv_vnode_t *vn_from_inode(struct inode *inode)
-{
- return inode;
-}
-static inline struct inode *vn_to_inode(bhv_vnode_t *vnode)
-{
- return vnode;
-}
-
/*
* Return values for xfs_inactive. A return value of
* VN_INACTIVE_NOCACHE implies that the file system behavior
@@ -76,57 +62,52 @@ extern void vn_iowait(struct xfs_inode *ip);
extern void vn_iowake(struct xfs_inode *ip);
extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l);
-static inline int vn_count(bhv_vnode_t *vp)
+static inline int vn_count(struct inode *vp)
{
- return atomic_read(&vn_to_inode(vp)->i_count);
+ return atomic_read(&vp->i_count);
}
-/*
- * Vnode reference counting functions (and macros for compatibility).
- */
-extern bhv_vnode_t *vn_hold(bhv_vnode_t *);
+#define IHOLD(ip) \
+do { \
+ ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
+ atomic_inc(&(VFS_I(ip)->i_count)); \
+ xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
+} while (0)
-#if defined(XFS_INODE_TRACE)
-#define VN_HOLD(vp) \
- ((void)vn_hold(vp), \
- xfs_itrace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address))
-#define VN_RELE(vp) \
- (xfs_itrace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \
- iput(vn_to_inode(vp)))
-#else
-#define VN_HOLD(vp) ((void)vn_hold(vp))
-#define VN_RELE(vp) (iput(vn_to_inode(vp)))
-#endif
+#define IRELE(ip) \
+do { \
+ xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
+ iput(VFS_I(ip)); \
+} while (0)
-static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp)
+static inline struct inode *vn_grab(struct inode *vp)
{
- struct inode *inode = igrab(vn_to_inode(vp));
- return inode ? vn_from_inode(inode) : NULL;
+ return igrab(vp);
}
/*
* Dealing with bad inodes
*/
-static inline int VN_BAD(bhv_vnode_t *vp)
+static inline int VN_BAD(struct inode *vp)
{
- return is_bad_inode(vn_to_inode(vp));
+ return is_bad_inode(vp);
}
/*
* Extracting atime values in various formats
*/
-static inline void vn_atime_to_bstime(bhv_vnode_t *vp, xfs_bstime_t *bs_atime)
+static inline void vn_atime_to_bstime(struct inode *vp, xfs_bstime_t *bs_atime)
{
bs_atime->tv_sec = vp->i_atime.tv_sec;
bs_atime->tv_nsec = vp->i_atime.tv_nsec;
}
-static inline void vn_atime_to_timespec(bhv_vnode_t *vp, struct timespec *ts)
+static inline void vn_atime_to_timespec(struct inode *vp, struct timespec *ts)
{
*ts = vp->i_atime;
}
-static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt)
+static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt)
{
*tt = vp->i_atime.tv_sec;
}
@@ -134,9 +115,9 @@ static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt)
/*
* Some useful predicates.
*/
-#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping)
-#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages)
-#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \
+#define VN_MAPPED(vp) mapping_mapped(vp->i_mapping)
+#define VN_CACHED(vp) (vp->i_mapping->nrpages)
+#define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \
PAGECACHE_TAG_DIRTY)
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index fc9f3fb39b7b..f2705f2fd43c 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -101,11 +101,18 @@ xfs_qm_dqinit(
if (brandnewdquot) {
dqp->dq_flnext = dqp->dq_flprev = dqp;
mutex_init(&dqp->q_qlock);
- initnsema(&dqp->q_flock, 1, "fdq");
sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
+ /*
+ * Because we want to use a counting completion, complete
+ * the flush completion once to allow a single access to
+ * the flush completion without blocking.
+ */
+ init_completion(&dqp->q_flush);
+ complete(&dqp->q_flush);
+
#ifdef XFS_DQUOT_TRACE
- dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP);
+ dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS);
xfs_dqtrace_entry(dqp, "DQINIT");
#endif
} else {
@@ -150,7 +157,6 @@ xfs_qm_dqdestroy(
ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp));
mutex_destroy(&dqp->q_qlock);
- freesema(&dqp->q_flock);
sv_destroy(&dqp->q_pinwait);
#ifdef XFS_DQUOT_TRACE
@@ -431,7 +437,7 @@ xfs_qm_dqalloc(
* when it unlocks the inode. Since we want to keep the quota
* inode around, we bump the vnode ref count now.
*/
- VN_HOLD(XFS_ITOV(quotip));
+ IHOLD(quotip);
xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
nmaps = 1;
@@ -1211,7 +1217,7 @@ xfs_qm_dqflush(
int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
+ ASSERT(!completion_done(&dqp->q_flush));
xfs_dqtrace_entry(dqp, "DQFLUSH");
/*
@@ -1348,34 +1354,18 @@ xfs_qm_dqflush_done(
xfs_dqfunlock(dqp);
}
-
-int
-xfs_qm_dqflock_nowait(
- xfs_dquot_t *dqp)
-{
- int locked;
-
- locked = cpsema(&((dqp)->q_flock));
-
- /* XXX ifdef these out */
- if (locked)
- (dqp)->dq_flags |= XFS_DQ_FLOCKED;
- return (locked);
-}
-
-
int
xfs_qm_dqlock_nowait(
xfs_dquot_t *dqp)
{
- return (mutex_trylock(&((dqp)->q_qlock)));
+ return mutex_trylock(&dqp->q_qlock);
}
void
xfs_dqlock(
xfs_dquot_t *dqp)
{
- mutex_lock(&(dqp->q_qlock));
+ mutex_lock(&dqp->q_qlock);
}
void
@@ -1468,7 +1458,7 @@ xfs_qm_dqpurge(
* if we're turning off quotas. Basically, we need this flush
* lock, and are willing to block on it.
*/
- if (! xfs_qm_dqflock_nowait(dqp)) {
+ if (!xfs_dqflock_nowait(dqp)) {
/*
* Block on the flush lock after nudging dquot buffer,
* if it is incore.
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index f7393bba4e95..8958d0faf8d3 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -82,7 +82,7 @@ typedef struct xfs_dquot {
xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
mutex_t q_qlock; /* quota lock */
- sema_t q_flock; /* flush lock */
+ struct completion q_flush; /* flush completion queue */
uint q_pincount; /* pin count for this dquot */
sv_t q_pinwait; /* sync var for pinning */
#ifdef XFS_DQUOT_TRACE
@@ -113,17 +113,25 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
/*
- * The following three routines simply manage the q_flock
- * semaphore embedded in the dquot. This semaphore synchronizes
- * processes attempting to flush the in-core dquot back to disk.
+ * Manage the q_flush completion queue embedded in the dquot. This completion
+ * queue synchronizes processes attempting to flush the in-core dquot back to
+ * disk.
*/
-#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\
- (dqp)->dq_flags |= XFS_DQ_FLOCKED; }
-#define xfs_dqfunlock(dqp) { ASSERT(issemalocked(&((dqp)->q_flock))); \
- vsema(&((dqp)->q_flock)); \
- (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); }
+static inline void xfs_dqflock(xfs_dquot_t *dqp)
+{
+ wait_for_completion(&dqp->q_flush);
+}
+
+static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp)
+{
+ return try_wait_for_completion(&dqp->q_flush);
+}
+
+static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
+{
+ complete(&dqp->q_flush);
+}
-#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock)))
#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
@@ -167,7 +175,6 @@ extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
extern int xfs_qm_dqpurge(xfs_dquot_t *);
extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
extern int xfs_qm_dqlock_nowait(xfs_dquot_t *);
-extern int xfs_qm_dqflock_nowait(xfs_dquot_t *);
extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
xfs_disk_dquot_t *);
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 08d2fc89e6a1..f028644caa5e 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -151,7 +151,7 @@ xfs_qm_dquot_logitem_push(
dqp = logitem->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
+ ASSERT(!completion_done(&dqp->q_flush));
/*
* Since we were able to lock the dquot's flush lock and
@@ -245,7 +245,7 @@ xfs_qm_dquot_logitem_pushbuf(
* inode flush completed and the inode was taken off the AIL.
* So, just get out.
*/
- if (!issemalocked(&(dqp->q_flock)) ||
+ if (completion_done(&dqp->q_flush) ||
((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
qip->qli_pushbuf_flag = 0;
xfs_dqunlock(dqp);
@@ -258,7 +258,7 @@ xfs_qm_dquot_logitem_pushbuf(
if (bp != NULL) {
if (XFS_BUF_ISDELAYWRITE(bp)) {
dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
- issemalocked(&(dqp->q_flock)));
+ !completion_done(&dqp->q_flush));
qip->qli_pushbuf_flag = 0;
xfs_dqunlock(dqp);
@@ -317,7 +317,7 @@ xfs_qm_dquot_logitem_trylock(
return (XFS_ITEM_LOCKED);
retval = XFS_ITEM_SUCCESS;
- if (! xfs_qm_dqflock_nowait(dqp)) {
+ if (!xfs_dqflock_nowait(dqp)) {
/*
* The dquot is already being flushed. It may have been
* flushed delayed write, however, and we don't want to
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 021934a3d456..df0ffef9775a 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -310,8 +310,7 @@ xfs_qm_unmount_quotadestroy(
*/
void
xfs_qm_mount_quotas(
- xfs_mount_t *mp,
- int mfsi_flags)
+ xfs_mount_t *mp)
{
int error = 0;
uint sbf;
@@ -346,8 +345,7 @@ xfs_qm_mount_quotas(
/*
* If any of the quotas are not consistent, do a quotacheck.
*/
- if (XFS_QM_NEED_QUOTACHECK(mp) &&
- !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
+ if (XFS_QM_NEED_QUOTACHECK(mp)) {
error = xfs_qm_quotacheck(mp);
if (error) {
/* Quotacheck failed and disabled quotas. */
@@ -484,7 +482,7 @@ again:
xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
/* XXX a sentinel would be better */
recl = XFS_QI_MPLRECLAIMS(mp);
- if (! xfs_qm_dqflock_nowait(dqp)) {
+ if (!xfs_dqflock_nowait(dqp)) {
/*
* If we can't grab the flush lock then check
* to see if the dquot has been flushed delayed
@@ -1062,7 +1060,7 @@ xfs_qm_sync(
/* XXX a sentinel would be better */
recl = XFS_QI_MPLRECLAIMS(mp);
- if (! xfs_qm_dqflock_nowait(dqp)) {
+ if (!xfs_dqflock_nowait(dqp)) {
if (nowait) {
xfs_dqunlock(dqp);
continue;
@@ -2079,7 +2077,7 @@ xfs_qm_shake_freelist(
* Try to grab the flush lock. If this dquot is in the process of
* getting flushed to disk, we don't want to reclaim it.
*/
- if (! xfs_qm_dqflock_nowait(dqp)) {
+ if (!xfs_dqflock_nowait(dqp)) {
xfs_dqunlock(dqp);
dqp = dqp->dq_flnext;
continue;
@@ -2257,7 +2255,7 @@ xfs_qm_dqreclaim_one(void)
* Try to grab the flush lock. If this dquot is in the process of
* getting flushed to disk, we don't want to reclaim it.
*/
- if (! xfs_qm_dqflock_nowait(dqp)) {
+ if (!xfs_dqflock_nowait(dqp)) {
xfs_dqunlock(dqp);
continue;
}
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index cd2300e374af..44f25349e478 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct {
#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
-extern void xfs_qm_mount_quotas(xfs_mount_t *, int);
+extern void xfs_qm_mount_quotas(xfs_mount_t *);
extern int xfs_qm_quotacheck(xfs_mount_t *);
extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
extern int xfs_qm_unmount_quotas(xfs_mount_t *);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index f4f6c4c861d7..eea2e60b456b 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -162,7 +162,7 @@ xfs_qm_newmount(
* mounting, and get on with the boring life
* without disk quotas.
*/
- xfs_qm_mount_quotas(mp, 0);
+ xfs_qm_mount_quotas(mp);
} else {
/*
* Clear the quota flags, but remember them. This
@@ -184,13 +184,12 @@ STATIC int
xfs_qm_endmount(
xfs_mount_t *mp,
uint needquotamount,
- uint quotaflags,
- int mfsi_flags)
+ uint quotaflags)
{
if (needquotamount) {
ASSERT(mp->m_qflags == 0);
mp->m_qflags = quotaflags;
- xfs_qm_mount_quotas(mp, mfsi_flags);
+ xfs_qm_mount_quotas(mp);
}
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index adfb8723f65a..1a3b803dfa55 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -1034,7 +1034,7 @@ xfs_qm_dqrele_all_inodes(
{
xfs_inode_t *ip, *topino;
uint ireclaims;
- bhv_vnode_t *vp;
+ struct inode *vp;
boolean_t vnode_refd;
ASSERT(mp->m_quotainfo);
@@ -1059,7 +1059,7 @@ again:
ip = ip->i_mnext;
continue;
}
- vp = XFS_ITOV_NULL(ip);
+ vp = VFS_I(ip);
if (!vp) {
ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL);
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 3e4648ad9cfc..b2f639a1416f 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -37,15 +37,15 @@
#include <linux/capability.h>
#include <linux/posix_acl_xattr.h>
-STATIC int xfs_acl_setmode(bhv_vnode_t *, xfs_acl_t *, int *);
+STATIC int xfs_acl_setmode(struct inode *, xfs_acl_t *, int *);
STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *);
STATIC void xfs_acl_get_endian(xfs_acl_t *);
STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *);
STATIC int xfs_acl_invalid(xfs_acl_t *);
STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *);
-STATIC void xfs_acl_get_attr(bhv_vnode_t *, xfs_acl_t *, int, int, int *);
-STATIC void xfs_acl_set_attr(bhv_vnode_t *, xfs_acl_t *, int, int *);
-STATIC int xfs_acl_allow_set(bhv_vnode_t *, int);
+STATIC void xfs_acl_get_attr(struct inode *, xfs_acl_t *, int, int, int *);
+STATIC void xfs_acl_set_attr(struct inode *, xfs_acl_t *, int, int *);
+STATIC int xfs_acl_allow_set(struct inode *, int);
kmem_zone_t *xfs_acl_zone;
@@ -55,7 +55,7 @@ kmem_zone_t *xfs_acl_zone;
*/
int
xfs_acl_vhasacl_access(
- bhv_vnode_t *vp)
+ struct inode *vp)
{
int error;
@@ -68,7 +68,7 @@ xfs_acl_vhasacl_access(
*/
int
xfs_acl_vhasacl_default(
- bhv_vnode_t *vp)
+ struct inode *vp)
{
int error;
@@ -207,7 +207,7 @@ posix_acl_xfs_to_xattr(
int
xfs_acl_vget(
- bhv_vnode_t *vp,
+ struct inode *vp,
void *acl,
size_t size,
int kind)
@@ -217,7 +217,6 @@ xfs_acl_vget(
posix_acl_xattr_header *ext_acl = acl;
int flags = 0;
- VN_HOLD(vp);
if(size) {
if (!(_ACL_ALLOC(xfs_acl))) {
error = ENOMEM;
@@ -239,11 +238,10 @@ xfs_acl_vget(
goto out;
}
if (kind == _ACL_TYPE_ACCESS)
- xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl);
+ xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, xfs_acl);
error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
}
out:
- VN_RELE(vp);
if(xfs_acl)
_ACL_FREE(xfs_acl);
return -error;
@@ -251,28 +249,26 @@ out:
int
xfs_acl_vremove(
- bhv_vnode_t *vp,
+ struct inode *vp,
int kind)
{
int error;
- VN_HOLD(vp);
error = xfs_acl_allow_set(vp, kind);
if (!error) {
- error = xfs_attr_remove(xfs_vtoi(vp),
+ error = xfs_attr_remove(XFS_I(vp),
kind == _ACL_TYPE_DEFAULT?
SGI_ACL_DEFAULT: SGI_ACL_FILE,
ATTR_ROOT);
if (error == ENOATTR)
error = 0; /* 'scool */
}
- VN_RELE(vp);
return -error;
}
int
xfs_acl_vset(
- bhv_vnode_t *vp,
+ struct inode *vp,
void *acl,
size_t size,
int kind)
@@ -298,7 +294,6 @@ xfs_acl_vset(
return 0;
}
- VN_HOLD(vp);
error = xfs_acl_allow_set(vp, kind);
/* Incoming ACL exists, set file mode based on its value */
@@ -321,7 +316,6 @@ xfs_acl_vset(
}
out:
- VN_RELE(vp);
_ACL_FREE(xfs_acl);
return -error;
}
@@ -363,7 +357,7 @@ xfs_acl_iaccess(
STATIC int
xfs_acl_allow_set(
- bhv_vnode_t *vp,
+ struct inode *vp,
int kind)
{
if (vp->i_flags & (S_IMMUTABLE|S_APPEND))
@@ -372,7 +366,7 @@ xfs_acl_allow_set(
return ENOTDIR;
if (vp->i_sb->s_flags & MS_RDONLY)
return EROFS;
- if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER))
+ if (XFS_I(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER))
return EPERM;
return 0;
}
@@ -566,7 +560,7 @@ xfs_acl_get_endian(
*/
STATIC void
xfs_acl_get_attr(
- bhv_vnode_t *vp,
+ struct inode *vp,
xfs_acl_t *aclp,
int kind,
int flags,
@@ -576,7 +570,7 @@ xfs_acl_get_attr(
ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1);
flags |= ATTR_ROOT;
- *error = xfs_attr_get(xfs_vtoi(vp),
+ *error = xfs_attr_get(XFS_I(vp),
kind == _ACL_TYPE_ACCESS ?
SGI_ACL_FILE : SGI_ACL_DEFAULT,
(char *)aclp, &len, flags);
@@ -590,7 +584,7 @@ xfs_acl_get_attr(
*/
STATIC void
xfs_acl_set_attr(
- bhv_vnode_t *vp,
+ struct inode *vp,
xfs_acl_t *aclp,
int kind,
int *error)
@@ -615,7 +609,7 @@ xfs_acl_set_attr(
INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm);
}
INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
- *error = xfs_attr_set(xfs_vtoi(vp),
+ *error = xfs_attr_set(XFS_I(vp),
kind == _ACL_TYPE_ACCESS ?
SGI_ACL_FILE: SGI_ACL_DEFAULT,
(char *)newacl, len, ATTR_ROOT);
@@ -624,7 +618,7 @@ xfs_acl_set_attr(
int
xfs_acl_vtoacl(
- bhv_vnode_t *vp,
+ struct inode *vp,
xfs_acl_t *access_acl,
xfs_acl_t *default_acl)
{
@@ -639,7 +633,7 @@ xfs_acl_vtoacl(
if (error)
access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
else /* We have a good ACL and the file mode, synchronize. */
- xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl);
+ xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, access_acl);
}
if (default_acl) {
@@ -656,7 +650,7 @@ xfs_acl_vtoacl(
*/
int
xfs_acl_inherit(
- bhv_vnode_t *vp,
+ struct inode *vp,
mode_t mode,
xfs_acl_t *pdaclp)
{
@@ -715,7 +709,7 @@ out_error:
*/
STATIC int
xfs_acl_setmode(
- bhv_vnode_t *vp,
+ struct inode *vp,
xfs_acl_t *acl,
int *basicperms)
{
@@ -734,7 +728,7 @@ xfs_acl_setmode(
* mode. The m:: bits take precedence over the g:: bits.
*/
iattr.ia_valid = ATTR_MODE;
- iattr.ia_mode = xfs_vtoi(vp)->i_d.di_mode;
+ iattr.ia_mode = XFS_I(vp)->i_d.di_mode;
iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
ap = acl->acl_entry;
for (i = 0; i < acl->acl_cnt; ++i) {
@@ -764,7 +758,7 @@ xfs_acl_setmode(
if (gap && nomask)
iattr.ia_mode |= gap->ae_perm << 3;
- return xfs_setattr(xfs_vtoi(vp), &iattr, 0, sys_cred);
+ return xfs_setattr(XFS_I(vp), &iattr, 0, sys_cred);
}
/*
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 323ee94cf831..a4e293b93efa 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -59,14 +59,14 @@ extern struct kmem_zone *xfs_acl_zone;
(zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
-extern int xfs_acl_inherit(bhv_vnode_t *, mode_t mode, xfs_acl_t *);
+extern int xfs_acl_inherit(struct inode *, mode_t mode, xfs_acl_t *);
extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
-extern int xfs_acl_vtoacl(bhv_vnode_t *, xfs_acl_t *, xfs_acl_t *);
-extern int xfs_acl_vhasacl_access(bhv_vnode_t *);
-extern int xfs_acl_vhasacl_default(bhv_vnode_t *);
-extern int xfs_acl_vset(bhv_vnode_t *, void *, size_t, int);
-extern int xfs_acl_vget(bhv_vnode_t *, void *, size_t, int);
-extern int xfs_acl_vremove(bhv_vnode_t *, int);
+extern int xfs_acl_vtoacl(struct inode *, xfs_acl_t *, xfs_acl_t *);
+extern int xfs_acl_vhasacl_access(struct inode *);
+extern int xfs_acl_vhasacl_default(struct inode *);
+extern int xfs_acl_vset(struct inode *, void *, size_t, int);
+extern int xfs_acl_vget(struct inode *, void *, size_t, int);
+extern int xfs_acl_vremove(struct inode *, int);
#define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index f9472a2076d4..0b3b5efe848c 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -92,16 +92,6 @@
((__u8*)(pointer))[1] = (((value) ) & 0xff); \
}
-/* define generic INT_ macros */
-
-#define INT_GET(reference,arch) \
- (((arch) == ARCH_NOCONVERT) \
- ? \
- (reference) \
- : \
- INT_SWAP((reference),(reference)) \
- )
-
/* does not return a value */
#define INT_SET(reference,arch,valueref) \
(__builtin_constant_p(valueref) ? \
@@ -112,64 +102,6 @@
) \
)
-/* does not return a value */
-#define INT_MOD_EXPR(reference,arch,code) \
- (((arch) == ARCH_NOCONVERT) \
- ? \
- (void)((reference) code) \
- : \
- (void)( \
- (reference) = INT_GET((reference),arch) , \
- ((reference) code), \
- INT_SET(reference, arch, reference) \
- ) \
- )
-
-/* does not return a value */
-#define INT_MOD(reference,arch,delta) \
- (void)( \
- INT_MOD_EXPR(reference,arch,+=(delta)) \
- )
-
-/*
- * INT_COPY - copy a value between two locations with the
- * _same architecture_ but _potentially different sizes_
- *
- * if the types of the two parameters are equal or they are
- * in native architecture, a simple copy is done
- *
- * otherwise, architecture conversions are done
- *
- */
-
-/* does not return a value */
-#define INT_COPY(dst,src,arch) \
- ( \
- ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \
- ? \
- (void)((dst) = (src)) \
- : \
- INT_SET(dst, arch, INT_GET(src, arch)) \
- )
-
-/*
- * INT_XLATE - copy a value in either direction between two locations
- * with different architectures
- *
- * dir < 0 - copy from memory to buffer (native to arch)
- * dir > 0 - copy from buffer to memory (arch to native)
- */
-
-/* does not return a value */
-#define INT_XLATE(buf,mem,dir,arch) {\
- ASSERT(dir); \
- if (dir>0) { \
- (mem)=INT_GET(buf, arch); \
- } else { \
- INT_SET(buf, arch, mem); \
- } \
-}
-
/*
* In directories inode numbers are stored as unaligned arrays of unsigned
* 8bit integers on disk.
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 78de80e3caa2..f7cdc28aff41 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -194,6 +194,46 @@ xfs_attr_get(
return(error);
}
+/*
+ * Calculate how many blocks we need for the new attribute,
+ */
+int
+xfs_attr_calc_size(
+ struct xfs_inode *ip,
+ int namelen,
+ int valuelen,
+ int *local)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ int size;
+ int nblks;
+
+ /*
+ * Determine space new attribute will use, and if it would be
+ * "local" or "remote" (note: local != inline).
+ */
+ size = xfs_attr_leaf_newentsize(namelen, valuelen,
+ mp->m_sb.sb_blocksize, local);
+
+ nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
+ if (*local) {
+ if (size > (mp->m_sb.sb_blocksize >> 1)) {
+ /* Double split possible */
+ nblks *= 2;
+ }
+ } else {
+ /*
+ * Out of line attribute, cannot double split, but
+ * make room for the attribute value itself.
+ */
+ uint dblocks = XFS_B_TO_FSB(mp, valuelen);
+ nblks += dblocks;
+ nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
+ }
+
+ return nblks;
+}
+
STATIC int
xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
char *value, int valuelen, int flags)
@@ -202,10 +242,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
xfs_fsblock_t firstblock;
xfs_bmap_free_t flist;
int error, err2, committed;
- int local, size;
- uint nblks;
xfs_mount_t *mp = dp->i_mount;
int rsvd = (flags & ATTR_ROOT) != 0;
+ int local;
/*
* Attach the dquots to the inode.
@@ -241,30 +280,8 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
args.whichfork = XFS_ATTR_FORK;
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
- /*
- * Determine space new attribute will use, and if it would be
- * "local" or "remote" (note: local != inline).
- */
- size = xfs_attr_leaf_newentsize(name->len, valuelen,
- mp->m_sb.sb_blocksize, &local);
-
- nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
- if (local) {
- if (size > (mp->m_sb.sb_blocksize >> 1)) {
- /* Double split possible */
- nblks <<= 1;
- }
- } else {
- uint dblocks = XFS_B_TO_FSB(mp, valuelen);
- /* Out of line attribute, cannot double split, but make
- * room for the attribute value itself.
- */
- nblks += dblocks;
- nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
- }
-
/* Size is now blocks for attribute data */
- args.total = nblks;
+ args.total = xfs_attr_calc_size(dp, name->len, valuelen, &local);
/*
* Start our first transaction of the day.
@@ -286,18 +303,17 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
if (rsvd)
args.trans->t_flags |= XFS_TRANS_RESERVE;
- if ((error = xfs_trans_reserve(args.trans, (uint) nblks,
- XFS_ATTRSET_LOG_RES(mp, nblks),
- 0, XFS_TRANS_PERM_LOG_RES,
- XFS_ATTRSET_LOG_COUNT))) {
+ if ((error = xfs_trans_reserve(args.trans, args.total,
+ XFS_ATTRSET_LOG_RES(mp, args.total), 0,
+ XFS_TRANS_PERM_LOG_RES, XFS_ATTRSET_LOG_COUNT))) {
xfs_trans_cancel(args.trans, 0);
return(error);
}
xfs_ilock(dp, XFS_ILOCK_EXCL);
- error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0,
- rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
- XFS_QMOPT_RES_REGBLKS);
+ error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, args.total, 0,
+ rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
+ XFS_QMOPT_RES_REGBLKS);
if (error) {
xfs_iunlock(dp, XFS_ILOCK_EXCL);
xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);
@@ -384,7 +400,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
* Commit the leaf transformation. We'll need another (linked)
* transaction to add the new attribute to the leaf.
*/
- if ((error = xfs_attr_rolltrans(&args.trans, dp)))
+
+ error = xfs_trans_roll(&args.trans, dp);
+ if (error)
goto out;
}
@@ -964,7 +982,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit the current trans (including the inode) and start
* a new one.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
return (error);
/*
@@ -978,7 +997,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit the transaction that added the attr name so that
* later routines can manage their own transactions.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
return (error);
/*
@@ -1067,7 +1087,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
/*
* Commit the remove and start the next trans in series.
*/
- error = xfs_attr_rolltrans(&args->trans, dp);
+ error = xfs_trans_roll(&args->trans, dp);
} else if (args->rmtblkno > 0) {
/*
@@ -1298,7 +1318,8 @@ restart:
* Commit the node conversion and start the next
* trans in the chain.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
goto out;
goto restart;
@@ -1349,7 +1370,8 @@ restart:
* Commit the leaf addition or btree split and start the next
* trans in the chain.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
goto out;
/*
@@ -1449,7 +1471,8 @@ restart:
/*
* Commit and start the next trans in the chain.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
goto out;
} else if (args->rmtblkno > 0) {
@@ -1581,7 +1604,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
/*
* Commit the Btree join operation and start a new trans.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
goto out;
}
@@ -2082,7 +2106,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
/*
* Start the next trans in the chain.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
return (error);
}
@@ -2232,7 +2257,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
/*
* Close out trans and start the next one in the chain.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, args->dp)))
+ error = xfs_trans_roll(&args->trans, args->dp);
+ if (error)
return (error);
}
return(0);
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 8b2d31c19e4d..fb3b2a68b9b9 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -129,6 +129,7 @@ typedef struct xfs_attr_list_context {
/*
* Overall external interface routines.
*/
+int xfs_attr_calc_size(struct xfs_inode *, int, int, int *);
int xfs_attr_inactive(struct xfs_inode *dp);
int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 23ef5d7c87e1..79da6b2ea99e 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -2498,9 +2498,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
/*
* Commit the flag value change and start the next trans in series.
*/
- error = xfs_attr_rolltrans(&args->trans, args->dp);
-
- return(error);
+ return xfs_trans_roll(&args->trans, args->dp);
}
/*
@@ -2547,9 +2545,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
/*
* Commit the flag value change and start the next trans in series.
*/
- error = xfs_attr_rolltrans(&args->trans, args->dp);
-
- return(error);
+ return xfs_trans_roll(&args->trans, args->dp);
}
/*
@@ -2665,7 +2661,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
/*
* Commit the flag value change and start the next trans in series.
*/
- error = xfs_attr_rolltrans(&args->trans, args->dp);
+ error = xfs_trans_roll(&args->trans, args->dp);
return(error);
}
@@ -2723,7 +2719,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
/*
* Commit the invalidate and start the next transaction.
*/
- error = xfs_attr_rolltrans(trans, dp);
+ error = xfs_trans_roll(trans, dp);
return (error);
}
@@ -2825,7 +2821,8 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
/*
* Atomically commit the whole invalidate stuff.
*/
- if ((error = xfs_attr_rolltrans(trans, dp)))
+ error = xfs_trans_roll(trans, dp);
+ if (error)
return (error);
}
@@ -2964,7 +2961,8 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
/*
* Roll to next transaction.
*/
- if ((error = xfs_attr_rolltrans(trans, dp)))
+ error = xfs_trans_roll(trans, dp);
+ if (error)
return (error);
}
@@ -2974,60 +2972,3 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
return(0);
}
-
-
-/*
- * Roll from one trans in the sequence of PERMANENT transactions to the next.
- */
-int
-xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp)
-{
- xfs_trans_t *trans;
- unsigned int logres, count;
- int error;
-
- /*
- * Ensure that the inode is always logged.
- */
- trans = *transp;
- xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
-
- /*
- * Copy the critical parameters from one trans to the next.
- */
- logres = trans->t_log_res;
- count = trans->t_log_count;
- *transp = xfs_trans_dup(trans);
-
- /*
- * Commit the current transaction.
- * If this commit failed, then it'd just unlock those items that
- * are not marked ihold. That also means that a filesystem shutdown
- * is in progress. The caller takes the responsibility to cancel
- * the duplicate transaction that gets returned.
- */
- if ((error = xfs_trans_commit(trans, 0)))
- return (error);
-
- trans = *transp;
-
- /*
- * Reserve space in the log for th next transaction.
- * This also pushes items in the "AIL", the list of logged items,
- * out to disk if they are taking up space at the tail of the log
- * that we want to use. This requires that either nothing be locked
- * across this call, or that anything that is locked be logged in
- * the prior and the next transactions.
- */
- error = xfs_trans_reserve(trans, 0, logres, 0,
- XFS_TRANS_PERM_LOG_RES, count);
- /*
- * Ensure that the inode is in the new transaction and locked.
- */
- if (!error) {
- xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(trans, dp);
- }
- return (error);
-
-}
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 5ecf437b7825..83e9af417ca2 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -274,6 +274,4 @@ int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp,
struct xfs_dabuf *leaf2_bp);
int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize,
int *local);
-int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp);
-
#endif /* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c
index fab0b6d5a41b..48228848f5ae 100644
--- a/fs/xfs/xfs_bit.c
+++ b/fs/xfs/xfs_bit.c
@@ -25,109 +25,6 @@
* XFS bit manipulation routines, used in non-realtime code.
*/
-#ifndef HAVE_ARCH_HIGHBIT
-/*
- * Index of high bit number in byte, -1 for none set, 0..7 otherwise.
- */
-static const char xfs_highbit[256] = {
- -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */
- 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */
- 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */
- 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */
- 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */
- 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */
- 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */
- 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */
- 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */
- 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */
- 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */
- 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */
- 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */
- 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */
- 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */
- 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */
-};
-#endif
-
-/*
- * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set.
- */
-inline int
-xfs_highbit32(
- __uint32_t v)
-{
-#ifdef HAVE_ARCH_HIGHBIT
- return highbit32(v);
-#else
- int i;
-
- if (v & 0xffff0000)
- if (v & 0xff000000)
- i = 24;
- else
- i = 16;
- else if (v & 0x0000ffff)
- if (v & 0x0000ff00)
- i = 8;
- else
- i = 0;
- else
- return -1;
- return i + xfs_highbit[(v >> i) & 0xff];
-#endif
-}
-
-/*
- * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set.
- */
-int
-xfs_lowbit64(
- __uint64_t v)
-{
- __uint32_t w = (__uint32_t)v;
- int n = 0;
-
- if (w) { /* lower bits */
- n = ffs(w);
- } else { /* upper bits */
- w = (__uint32_t)(v >> 32);
- if (w && (n = ffs(w)))
- n += 32;
- }
- return n - 1;
-}
-
-/*
- * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set.
- */
-int
-xfs_highbit64(
- __uint64_t v)
-{
- __uint32_t h = (__uint32_t)(v >> 32);
-
- if (h)
- return xfs_highbit32(h) + 32;
- return xfs_highbit32((__uint32_t)v);
-}
-
-
/*
* Return whether bitmap is empty.
* Size is number of words in the bitmap, which is padded to word boundary
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h
index 082641a9782c..8e0e463dae2d 100644
--- a/fs/xfs/xfs_bit.h
+++ b/fs/xfs/xfs_bit.h
@@ -47,13 +47,39 @@ static inline __uint64_t xfs_mask64lo(int n)
}
/* Get high bit set out of 32-bit argument, -1 if none set */
-extern int xfs_highbit32(__uint32_t v);
+static inline int xfs_highbit32(__uint32_t v)
+{
+ return fls(v) - 1;
+}
+
+/* Get high bit set out of 64-bit argument, -1 if none set */
+static inline int xfs_highbit64(__uint64_t v)
+{
+ return fls64(v) - 1;
+}
+
+/* Get low bit set out of 32-bit argument, -1 if none set */
+static inline int xfs_lowbit32(__uint32_t v)
+{
+ unsigned long t = v;
+ return (v) ? find_first_bit(&t, 32) : -1;
+}
/* Get low bit set out of 64-bit argument, -1 if none set */
-extern int xfs_lowbit64(__uint64_t v);
+static inline int xfs_lowbit64(__uint64_t v)
+{
+ __uint32_t w = (__uint32_t)v;
+ int n = 0;
-/* Get high bit set out of 64-bit argument, -1 if none set */
-extern int xfs_highbit64(__uint64_t);
+ if (w) { /* lower bits */
+ n = ffs(w);
+ } else { /* upper bits */
+ w = (__uint32_t)(v >> 32);
+ if (w && (n = ffs(w)))
+ n += 32;
+ }
+ return n - 1;
+}
/* Return whether bitmap is empty (1 == empty) */
extern int xfs_bitmap_empty(uint *map, uint size);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3c4beb3a4326..a1aab9275d5a 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -384,14 +384,14 @@ xfs_bmap_count_tree(
int levelin,
int *count);
-STATIC int
+STATIC void
xfs_bmap_count_leaves(
xfs_ifork_t *ifp,
xfs_extnum_t idx,
int numrecs,
int *count);
-STATIC int
+STATIC void
xfs_bmap_disk_count_leaves(
xfs_extnum_t idx,
xfs_bmbt_block_t *block,
@@ -4000,7 +4000,7 @@ xfs_bmap_add_attrfork(
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
}
ASSERT(ip->i_d.di_anextents == 0);
- VN_HOLD(XFS_ITOV(ip));
+ IHOLD(ip);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
switch (ip->i_d.di_format) {
@@ -6096,7 +6096,7 @@ xfs_bmap_get_bp(
tp = cur->bc_tp;
licp = &tp->t_items;
while (!bp && licp != NULL) {
- if (XFS_LIC_ARE_ALL_FREE(licp)) {
+ if (xfs_lic_are_all_free(licp)) {
licp = licp->lic_next;
continue;
}
@@ -6106,11 +6106,11 @@ xfs_bmap_get_bp(
xfs_buf_log_item_t *bip;
xfs_buf_t *lbp;
- if (XFS_LIC_ISFREE(licp, i)) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
- lidp = XFS_LIC_SLOT(licp, i);
+ lidp = xfs_lic_slot(licp, i);
lip = lidp->lid_item;
if (lip->li_type != XFS_LI_BUF)
continue;
@@ -6367,13 +6367,9 @@ xfs_bmap_count_blocks(
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
- if (unlikely(xfs_bmap_count_leaves(ifp, 0,
+ xfs_bmap_count_leaves(ifp, 0,
ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
- count) < 0)) {
- XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
- XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
+ count);
return 0;
}
@@ -6454,13 +6450,7 @@ xfs_bmap_count_tree(
for (;;) {
nextbno = be64_to_cpu(block->bb_rightsib);
numrecs = be16_to_cpu(block->bb_numrecs);
- if (unlikely(xfs_bmap_disk_count_leaves(0,
- block, numrecs, count) < 0)) {
- xfs_trans_brelse(tp, bp);
- XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
- XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
+ xfs_bmap_disk_count_leaves(0, block, numrecs, count);
xfs_trans_brelse(tp, bp);
if (nextbno == NULLFSBLOCK)
break;
@@ -6478,7 +6468,7 @@ xfs_bmap_count_tree(
/*
* Count leaf blocks given a range of extent records.
*/
-STATIC int
+STATIC void
xfs_bmap_count_leaves(
xfs_ifork_t *ifp,
xfs_extnum_t idx,
@@ -6491,14 +6481,13 @@ xfs_bmap_count_leaves(
xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
*count += xfs_bmbt_get_blockcount(frp);
}
- return 0;
}
/*
* Count leaf blocks given a range of extent records originally
* in btree format.
*/
-STATIC int
+STATIC void
xfs_bmap_disk_count_leaves(
xfs_extnum_t idx,
xfs_bmbt_block_t *block,
@@ -6512,5 +6501,4 @@ xfs_bmap_disk_count_leaves(
frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b);
*count += xfs_bmbt_disk_get_blockcount(frp);
}
- return 0;
}
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index aeb87ca69fcc..cc593a84c345 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -46,38 +46,11 @@ kmem_zone_t *xfs_btree_cur_zone;
/*
* Btree magic numbers.
*/
-const __uint32_t xfs_magics[XFS_BTNUM_MAX] =
-{
+const __uint32_t xfs_magics[XFS_BTNUM_MAX] = {
XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC
};
/*
- * Prototypes for internal routines.
- */
-
-/*
- * Checking routine: return maxrecs for the block.
- */
-STATIC int /* number of records fitting in block */
-xfs_btree_maxrecs(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_block_t *block);/* generic btree block pointer */
-
-/*
- * Internal routines.
- */
-
-/*
- * Retrieve the block pointer from the cursor at the given level.
- * This may be a bmap btree root or from a buffer.
- */
-STATIC xfs_btree_block_t * /* generic btree block pointer */
-xfs_btree_get_block(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level in btree */
- struct xfs_buf **bpp); /* buffer containing the block */
-
-/*
* Checking routine: return maxrecs for the block.
*/
STATIC int /* number of records fitting in block */
@@ -457,35 +430,6 @@ xfs_btree_dup_cursor(
}
/*
- * Change the cursor to point to the first record at the given level.
- * Other levels are unaffected.
- */
-int /* success=1, failure=0 */
-xfs_btree_firstrec(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level) /* level to change */
-{
- xfs_btree_block_t *block; /* generic btree block pointer */
- xfs_buf_t *bp; /* buffer containing block */
-
- /*
- * Get the block pointer for this level.
- */
- block = xfs_btree_get_block(cur, level, &bp);
- xfs_btree_check_block(cur, block, level, bp);
- /*
- * It's empty, there is no such record.
- */
- if (!block->bb_h.bb_numrecs)
- return 0;
- /*
- * Set the ptr value to 1, that's the first record/key.
- */
- cur->bc_ptrs[level] = 1;
- return 1;
-}
-
-/*
* Retrieve the block pointer from the cursor at the given level.
* This may be a bmap btree root or from a buffer.
*/
@@ -626,6 +570,13 @@ xfs_btree_init_cursor(
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
break;
+ case XFS_BTNUM_INO:
+ /*
+ * Inode allocation btree fields.
+ */
+ cur->bc_private.a.agbp = agbp;
+ cur->bc_private.a.agno = agno;
+ break;
case XFS_BTNUM_BMAP:
/*
* Bmap btree fields.
@@ -638,13 +589,6 @@ xfs_btree_init_cursor(
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
break;
- case XFS_BTNUM_INO:
- /*
- * Inode allocation btree fields.
- */
- cur->bc_private.i.agbp = agbp;
- cur->bc_private.i.agno = agno;
- break;
default:
ASSERT(0);
}
@@ -671,6 +615,35 @@ xfs_btree_islastblock(
}
/*
+ * Change the cursor to point to the first record at the given level.
+ * Other levels are unaffected.
+ */
+int /* success=1, failure=0 */
+xfs_btree_firstrec(
+ xfs_btree_cur_t *cur, /* btree cursor */
+ int level) /* level to change */
+{
+ xfs_btree_block_t *block; /* generic btree block pointer */
+ xfs_buf_t *bp; /* buffer containing block */
+
+ /*
+ * Get the block pointer for this level.
+ */
+ block = xfs_btree_get_block(cur, level, &bp);
+ xfs_btree_check_block(cur, block, level, bp);
+ /*
+ * It's empty, there is no such record.
+ */
+ if (!block->bb_h.bb_numrecs)
+ return 0;
+ /*
+ * Set the ptr value to 1, that's the first record/key.
+ */
+ cur->bc_ptrs[level] = 1;
+ return 1;
+}
+
+/*
* Change the cursor to point to the last record in the current block
* at the given level. Other levels are unaffected.
*/
@@ -890,12 +863,12 @@ xfs_btree_readahead_core(
case XFS_BTNUM_INO:
i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]);
if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(i->bb_leftsib), 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(i->bb_rightsib), 1);
rval++;
}
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 7440b78f9cec..1f528a2a3754 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -158,8 +158,8 @@ typedef struct xfs_btree_cur
__uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */
xfs_btnum_t bc_btnum; /* identifies which btree type */
union {
- struct { /* needed for BNO, CNT */
- struct xfs_buf *agbp; /* agf buffer pointer */
+ struct { /* needed for BNO, CNT, INO */
+ struct xfs_buf *agbp; /* agf/agi buffer pointer */
xfs_agnumber_t agno; /* ag number */
} a;
struct { /* needed for BMAP */
@@ -172,10 +172,6 @@ typedef struct xfs_btree_cur
char flags; /* flags */
#define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */
} b;
- struct { /* needed for INO */
- struct xfs_buf *agbp; /* agi buffer pointer */
- xfs_agnumber_t agno; /* ag number */
- } i;
} bc_private; /* per-btree type data */
} xfs_btree_cur_t;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index d86ca2c03a70..608c30c3f76b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -737,7 +737,7 @@ xfs_buf_item_init(
bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
bip->bli_format.blf_map_size = map_size;
#ifdef XFS_BLI_TRACE
- bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP);
+ bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_TRANS_DEBUG
@@ -1056,7 +1056,7 @@ xfs_buf_iodone_callbacks(
anyway. */
XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
XFS_BUF_DONE(bp);
- XFS_BUF_V_IODONESEMA(bp);
+ XFS_BUF_FINISH_IOWAIT(bp);
}
return;
}
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 2211e885ef24..760f4c5b5160 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -128,10 +128,8 @@ xfs_swap_extents(
xfs_swapext_t *sxp)
{
xfs_mount_t *mp;
- xfs_inode_t *ips[2];
xfs_trans_t *tp;
xfs_bstat_t *sbp = &sxp->sx_stat;
- bhv_vnode_t *vp, *tvp;
xfs_ifork_t *tempifp, *ifp, *tifp;
int ilf_fields, tilf_fields;
static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
@@ -150,19 +148,8 @@ xfs_swap_extents(
}
sbp = &sxp->sx_stat;
- vp = XFS_ITOV(ip);
- tvp = XFS_ITOV(tip);
-
- /* Lock in i_ino order */
- if (ip->i_ino < tip->i_ino) {
- ips[0] = ip;
- ips[1] = tip;
- } else {
- ips[0] = tip;
- ips[1] = ip;
- }
- xfs_lock_inodes(ips, 2, lock_flags);
+ xfs_lock_two_inodes(ip, tip, lock_flags);
locked = 1;
/* Verify that both files have the same format */
@@ -184,7 +171,7 @@ xfs_swap_extents(
goto error0;
}
- if (VN_CACHED(tvp) != 0) {
+ if (VN_CACHED(VFS_I(tip)) != 0) {
xfs_inval_cached_trace(tip, 0, -1, 0, -1);
error = xfs_flushinval_pages(tip, 0, -1,
FI_REMAPF_LOCKED);
@@ -193,7 +180,7 @@ xfs_swap_extents(
}
/* Verify O_DIRECT for ftmp */
- if (VN_CACHED(tvp) != 0) {
+ if (VN_CACHED(VFS_I(tip)) != 0) {
error = XFS_ERROR(EINVAL);
goto error0;
}
@@ -237,7 +224,7 @@ xfs_swap_extents(
* vop_read (or write in the case of autogrow) they block on the iolock
* until we have switched the extents.
*/
- if (VN_MAPPED(vp)) {
+ if (VN_MAPPED(VFS_I(ip))) {
error = XFS_ERROR(EBUSY);
goto error0;
}
@@ -265,7 +252,7 @@ xfs_swap_extents(
locked = 0;
goto error0;
}
- xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
/*
* Count the number of extended attribute blocks
@@ -350,15 +337,11 @@ xfs_swap_extents(
break;
}
- /*
- * Increment vnode ref counts since xfs_trans_commit &
- * xfs_trans_cancel will both unlock the inodes and
- * decrement the associated ref counts.
- */
- VN_HOLD(vp);
- VN_HOLD(tvp);
+ IHOLD(ip);
xfs_trans_ijoin(tp, ip, lock_flags);
+
+ IHOLD(tip);
xfs_trans_ijoin(tp, tip, lock_flags);
xfs_trans_log_inode(tp, ip, ilf_fields);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index f66756cfb5e8..f227ecd1a294 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -58,9 +58,6 @@ xfs_error_trap(int e)
}
return e;
}
-#endif
-
-#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
int xfs_etest[XFS_NUM_INJECT_ERROR];
int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
@@ -154,7 +151,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud)
return 0;
}
-#endif /* DEBUG || INDUCE_IO_ERROR */
+#endif /* DEBUG */
static void
xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap)
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index d8559d132efa..11543f10b0c6 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -125,22 +125,14 @@ extern void xfs_corruption_error(char *tag, int level, struct xfs_mount *mp,
#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10)
#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
-#if (defined(DEBUG) || defined(INDUCE_IO_ERROR))
+#ifdef DEBUG
extern int xfs_error_test(int, int *, char *, int, char *, unsigned long);
#define XFS_NUM_INJECT_ERROR 10
-
-#ifdef __ANSI_CPP__
-#define XFS_TEST_ERROR(expr, mp, tag, rf) \
- ((expr) || \
- xfs_error_test((tag), (mp)->m_fixedfsid, #expr, __LINE__, __FILE__, \
- (rf)))
-#else
#define XFS_TEST_ERROR(expr, mp, tag, rf) \
((expr) || \
xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
(rf)))
-#endif /* __ANSI_CPP__ */
extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
@@ -148,7 +140,7 @@ extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
#define XFS_TEST_ERROR(expr, mp, tag, rf) (expr)
#define xfs_errortag_add(tag, mp) (ENOSYS)
#define xfs_errortag_clearall(mp, loud) (ENOSYS)
-#endif /* (DEBUG || INDUCE_IO_ERROR) */
+#endif /* DEBUG */
/*
* XFS panic tags -- allow a call to xfs_cmn_err() be turned into
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index c38fd14fca29..f3bb75da384e 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -400,7 +400,7 @@ xfs_filestream_init(void)
if (!item_zone)
return -ENOMEM;
#ifdef XFS_FILESTREAMS_TRACE
- xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP);
+ xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS);
#endif
return 0;
}
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index e5310c90e50f..83502f3edef0 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -181,7 +181,7 @@ xfs_inobt_delrec(
* then we can get rid of this level.
*/
if (numrecs == 1 && level > 0) {
- agbp = cur->bc_private.i.agbp;
+ agbp = cur->bc_private.a.agbp;
agi = XFS_BUF_TO_AGI(agbp);
/*
* pp is still set to the first pointer in the block.
@@ -194,7 +194,7 @@ xfs_inobt_delrec(
* Free the block.
*/
if ((error = xfs_free_extent(cur->bc_tp,
- XFS_AGB_TO_FSB(mp, cur->bc_private.i.agno, bno), 1)))
+ XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, bno), 1)))
return error;
xfs_trans_binval(cur->bc_tp, bp);
xfs_ialloc_log_agi(cur->bc_tp, agbp,
@@ -379,7 +379,7 @@ xfs_inobt_delrec(
rrecs = be16_to_cpu(right->bb_numrecs);
rbp = bp;
if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
- cur->bc_private.i.agno, lbno, 0, &lbp,
+ cur->bc_private.a.agno, lbno, 0, &lbp,
XFS_INO_BTREE_REF)))
return error;
left = XFS_BUF_TO_INOBT_BLOCK(lbp);
@@ -401,7 +401,7 @@ xfs_inobt_delrec(
lrecs = be16_to_cpu(left->bb_numrecs);
lbp = bp;
if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
- cur->bc_private.i.agno, rbno, 0, &rbp,
+ cur->bc_private.a.agno, rbno, 0, &rbp,
XFS_INO_BTREE_REF)))
return error;
right = XFS_BUF_TO_INOBT_BLOCK(rbp);
@@ -484,7 +484,7 @@ xfs_inobt_delrec(
xfs_buf_t *rrbp;
if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
- cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 0,
+ cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
&rrbp, XFS_INO_BTREE_REF)))
return error;
rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp);
@@ -497,7 +497,7 @@ xfs_inobt_delrec(
* Free the deleting block.
*/
if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp,
- cur->bc_private.i.agno, rbno), 1)))
+ cur->bc_private.a.agno, rbno), 1)))
return error;
xfs_trans_binval(cur->bc_tp, rbp);
/*
@@ -854,7 +854,7 @@ xfs_inobt_lookup(
{
xfs_agi_t *agi; /* a.g. inode header */
- agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp);
+ agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
agno = be32_to_cpu(agi->agi_seqno);
agbno = be32_to_cpu(agi->agi_root);
}
@@ -1089,7 +1089,7 @@ xfs_inobt_lshift(
* Set up the left neighbor as "left".
*/
if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.i.agno, be32_to_cpu(right->bb_leftsib),
+ cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib),
0, &lbp, XFS_INO_BTREE_REF)))
return error;
left = XFS_BUF_TO_INOBT_BLOCK(lbp);
@@ -1207,10 +1207,10 @@ xfs_inobt_newroot(
/*
* Get a block & a buffer.
*/
- agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp);
+ agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
- args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno,
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno,
be32_to_cpu(agi->agi_root));
args.mod = args.minleft = args.alignment = args.total = args.wasdel =
args.isfl = args.userdata = args.minalignslop = 0;
@@ -1233,7 +1233,7 @@ xfs_inobt_newroot(
*/
agi->agi_root = cpu_to_be32(args.agbno);
be32_add_cpu(&agi->agi_level, 1);
- xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp,
+ xfs_ialloc_log_agi(args.tp, cur->bc_private.a.agbp,
XFS_AGI_ROOT | XFS_AGI_LEVEL);
/*
* At the previous root level there are now two blocks: the old
@@ -1376,7 +1376,7 @@ xfs_inobt_rshift(
* Set up the right neighbor as "right".
*/
if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib),
+ cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib),
0, &rbp, XFS_INO_BTREE_REF)))
return error;
right = XFS_BUF_TO_INOBT_BLOCK(rbp);
@@ -1492,7 +1492,7 @@ xfs_inobt_split(
* Allocate the new block.
* If we can't do it, we're toast. Give up.
*/
- args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, lbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, lbno);
args.mod = args.minleft = args.alignment = args.total = args.wasdel =
args.isfl = args.userdata = args.minalignslop = 0;
args.minlen = args.maxlen = args.prod = 1;
@@ -1725,7 +1725,7 @@ xfs_inobt_decrement(
agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.i.agno, agbno, 0, &bp,
+ cur->bc_private.a.agno, agbno, 0, &bp,
XFS_INO_BTREE_REF)))
return error;
lev--;
@@ -1897,7 +1897,7 @@ xfs_inobt_increment(
agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.i.agno, agbno, 0, &bp,
+ cur->bc_private.a.agno, agbno, 0, &bp,
XFS_INO_BTREE_REF)))
return error;
lev--;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index b07604b94d9f..e229e9e001c2 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -216,7 +216,14 @@ finish_inode:
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
init_waitqueue_head(&ip->i_ipin_wait);
atomic_set(&ip->i_pincount, 0);
- initnsema(&ip->i_flock, 1, "xfsfino");
+
+ /*
+ * Because we want to use a counting completion, complete
+ * the flush completion once to allow a single access to
+ * the flush completion without blocking.
+ */
+ init_completion(&ip->i_flush);
+ complete(&ip->i_flush);
if (lock_flags)
xfs_ilock(ip, lock_flags);
@@ -288,10 +295,17 @@ finish_inode:
*ipp = ip;
/*
+ * Set up the Linux with the Linux inode.
+ */
+ ip->i_vnode = inode;
+ inode->i_private = ip;
+
+ /*
* If we have a real type for an on-disk inode, we can set ops(&unlock)
* now. If it's a new inode being created, xfs_ialloc will handle it.
*/
- xfs_initialize_vnode(mp, inode, ip);
+ if (ip->i_d.di_mode != 0)
+ xfs_setup_inode(ip);
return 0;
}
@@ -411,10 +425,11 @@ xfs_iput(xfs_inode_t *ip,
* Special iput for brand-new inodes that are still locked
*/
void
-xfs_iput_new(xfs_inode_t *ip,
- uint lock_flags)
+xfs_iput_new(
+ xfs_inode_t *ip,
+ uint lock_flags)
{
- struct inode *inode = ip->i_vnode;
+ struct inode *inode = VFS_I(ip);
xfs_itrace_entry(ip);
@@ -775,26 +790,3 @@ xfs_isilocked(
}
#endif
-/*
- * The following three routines simply manage the i_flock
- * semaphore embedded in the inode. This semaphore synchronizes
- * processes attempting to flush the in-core inode back to disk.
- */
-void
-xfs_iflock(xfs_inode_t *ip)
-{
- psema(&(ip->i_flock), PINOD|PLTWAIT);
-}
-
-int
-xfs_iflock_nowait(xfs_inode_t *ip)
-{
- return (cpsema(&(ip->i_flock)));
-}
-
-void
-xfs_ifunlock(xfs_inode_t *ip)
-{
- ASSERT(issemalocked(&(ip->i_flock)));
- vsema(&(ip->i_flock));
-}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index bedc66163176..00e80df9dd9d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -580,8 +580,8 @@ xfs_iformat_extents(
xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
for (i = 0; i < nex; i++, dp++) {
xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
- ep->l0 = be64_to_cpu(get_unaligned(&dp->l0));
- ep->l1 = be64_to_cpu(get_unaligned(&dp->l1));
+ ep->l0 = get_unaligned_be64(&dp->l0);
+ ep->l1 = get_unaligned_be64(&dp->l1);
}
XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
if (whichfork != XFS_DATA_FORK ||
@@ -835,22 +835,22 @@ xfs_iread(
* Do this before xfs_iformat in case it adds entries.
*/
#ifdef XFS_INODE_TRACE
- ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP);
+ ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_BMAP_TRACE
- ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
+ ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_BMBT_TRACE
- ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
+ ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_RW_TRACE
- ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
+ ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_ILOCK_TRACE
- ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
+ ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_DIR2_TRACE
- ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
+ ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
#endif
/*
@@ -1046,9 +1046,9 @@ xfs_ialloc(
{
xfs_ino_t ino;
xfs_inode_t *ip;
- bhv_vnode_t *vp;
uint flags;
int error;
+ timespec_t tv;
/*
* Call the space management code to pick
@@ -1077,13 +1077,12 @@ xfs_ialloc(
}
ASSERT(ip != NULL);
- vp = XFS_ITOV(ip);
ip->i_d.di_mode = (__uint16_t)mode;
ip->i_d.di_onlink = 0;
ip->i_d.di_nlink = nlink;
ASSERT(ip->i_d.di_nlink == nlink);
- ip->i_d.di_uid = current_fsuid(cr);
- ip->i_d.di_gid = current_fsgid(cr);
+ ip->i_d.di_uid = current_fsuid();
+ ip->i_d.di_gid = current_fsgid();
ip->i_d.di_projid = prid;
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
@@ -1130,7 +1129,13 @@ xfs_ialloc(
ip->i_size = 0;
ip->i_d.di_nextents = 0;
ASSERT(ip->i_d.di_nblocks == 0);
- xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
+
+ nanotime(&tv);
+ ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
+ ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
+ ip->i_d.di_atime = ip->i_d.di_mtime;
+ ip->i_d.di_ctime = ip->i_d.di_mtime;
+
/*
* di_gen will have been taken care of in xfs_iread.
*/
@@ -1220,7 +1225,7 @@ xfs_ialloc(
xfs_trans_log_inode(tp, ip, flags);
/* now that we have an i_mode we can setup inode ops and unlock */
- xfs_initialize_vnode(tp->t_mountp, vp, ip);
+ xfs_setup_inode(ip);
*ipp = ip;
return 0;
@@ -1399,7 +1404,6 @@ xfs_itruncate_start(
xfs_fsize_t last_byte;
xfs_off_t toss_start;
xfs_mount_t *mp;
- bhv_vnode_t *vp;
int error = 0;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
@@ -1408,7 +1412,6 @@ xfs_itruncate_start(
(flags == XFS_ITRUNC_MAYBE));
mp = ip->i_mount;
- vp = XFS_ITOV(ip);
/* wait for the completion of any pending DIOs */
if (new_size < ip->i_size)
@@ -1457,7 +1460,7 @@ xfs_itruncate_start(
#ifdef DEBUG
if (new_size == 0) {
- ASSERT(VN_CACHED(vp) == 0);
+ ASSERT(VN_CACHED(VFS_I(ip)) == 0);
}
#endif
return error;
@@ -2630,7 +2633,6 @@ xfs_idestroy(
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
mrfree(&ip->i_lock);
mrfree(&ip->i_iolock);
- freesema(&ip->i_flock);
#ifdef XFS_INODE_TRACE
ktrace_free(ip->i_trace);
@@ -3048,10 +3050,10 @@ cluster_corrupt_out:
/*
* xfs_iflush() will write a modified inode's changes out to the
* inode's on disk home. The caller must have the inode lock held
- * in at least shared mode and the inode flush semaphore must be
- * held as well. The inode lock will still be held upon return from
+ * in at least shared mode and the inode flush completion must be
+ * active as well. The inode lock will still be held upon return from
* the call and the caller is free to unlock it.
- * The inode flush lock will be unlocked when the inode reaches the disk.
+ * The inode flush will be completed when the inode reaches the disk.
* The flags indicate how the inode's buffer should be written out.
*/
int
@@ -3070,7 +3072,7 @@ xfs_iflush(
XFS_STATS_INC(xs_iflush_count);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
- ASSERT(issemalocked(&(ip->i_flock)));
+ ASSERT(!completion_done(&ip->i_flush));
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > ip->i_df.if_ext_max);
@@ -3233,7 +3235,7 @@ xfs_iflush_int(
#endif
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
- ASSERT(issemalocked(&(ip->i_flock)));
+ ASSERT(!completion_done(&ip->i_flush));
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > ip->i_df.if_ext_max);
@@ -3465,7 +3467,6 @@ xfs_iflush_all(
xfs_mount_t *mp)
{
xfs_inode_t *ip;
- bhv_vnode_t *vp;
again:
XFS_MOUNT_ILOCK(mp);
@@ -3480,14 +3481,13 @@ xfs_iflush_all(
continue;
}
- vp = XFS_ITOV_NULL(ip);
- if (!vp) {
+ if (!VFS_I(ip)) {
XFS_MOUNT_IUNLOCK(mp);
xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
goto again;
}
- ASSERT(vn_count(vp) == 0);
+ ASSERT(vn_count(VFS_I(ip)) == 0);
ip = ip->i_mnext;
} while (ip != mp->m_inodes);
@@ -3707,7 +3707,7 @@ xfs_iext_add_indirect_multi(
* (all extents past */
if (nex2) {
byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
- nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
+ nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
erp->er_extcount -= nex2;
xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
@@ -4007,8 +4007,7 @@ xfs_iext_realloc_direct(
ifp->if_u1.if_extents =
kmem_realloc(ifp->if_u1.if_extents,
rnew_size,
- ifp->if_real_bytes,
- KM_SLEEP);
+ ifp->if_real_bytes, KM_NOFS);
}
if (rnew_size > ifp->if_real_bytes) {
memset(&ifp->if_u1.if_extents[ifp->if_bytes /
@@ -4067,7 +4066,7 @@ xfs_iext_inline_to_direct(
xfs_ifork_t *ifp, /* inode fork pointer */
int new_size) /* number of extents in file */
{
- ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP);
+ ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
memset(ifp->if_u1.if_extents, 0, new_size);
if (ifp->if_bytes) {
memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
@@ -4099,7 +4098,7 @@ xfs_iext_realloc_indirect(
} else {
ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
kmem_realloc(ifp->if_u1.if_ext_irec,
- new_size, size, KM_SLEEP);
+ new_size, size, KM_NOFS);
}
}
@@ -4341,11 +4340,10 @@ xfs_iext_irec_init(
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
ASSERT(nextents <= XFS_LINEAR_EXTS);
- erp = (xfs_ext_irec_t *)
- kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
+ erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
if (nextents == 0) {
- ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
+ ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
} else if (!ifp->if_real_bytes) {
xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
} else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
@@ -4393,7 +4391,7 @@ xfs_iext_irec_new(
/* Initialize new extent record */
erp = ifp->if_u1.if_ext_irec;
- erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
+ erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
erp[erp_idx].er_extcount = 0;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 17a04b6321ed..1420c49674d7 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -87,8 +87,7 @@ typedef struct xfs_ifork {
* Flags for xfs_ichgtime().
*/
#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */
-#define XFS_ICHGTIME_ACC 0x2 /* data fork access timestamp */
-#define XFS_ICHGTIME_CHG 0x4 /* inode field change timestamp */
+#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */
/*
* Per-fork incore inode flags.
@@ -204,7 +203,7 @@ typedef struct xfs_inode {
struct xfs_inode *i_mprev; /* ptr to prev inode */
struct xfs_mount *i_mount; /* fs mount struct ptr */
struct list_head i_reclaim; /* reclaim list */
- bhv_vnode_t *i_vnode; /* vnode backpointer */
+ struct inode *i_vnode; /* vnode backpointer */
struct xfs_dquot *i_udquot; /* user dquot */
struct xfs_dquot *i_gdquot; /* group dquot */
@@ -223,7 +222,7 @@ typedef struct xfs_inode {
struct xfs_inode_log_item *i_itemp; /* logging information */
mrlock_t i_lock; /* inode lock */
mrlock_t i_iolock; /* inode IO lock */
- sema_t i_flock; /* inode flush lock */
+ struct completion i_flush; /* inode flush completion q */
atomic_t i_pincount; /* inode pin count */
wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */
spinlock_t i_flags_lock; /* inode i_flags lock */
@@ -263,6 +262,18 @@ typedef struct xfs_inode {
#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \
(ip)->i_size : (ip)->i_d.di_size;
+/* Convert from vfs inode to xfs inode */
+static inline struct xfs_inode *XFS_I(struct inode *inode)
+{
+ return (struct xfs_inode *)inode->i_private;
+}
+
+/* convert from xfs inode to vfs inode */
+static inline struct inode *VFS_I(struct xfs_inode *ip)
+{
+ return (struct inode *)ip->i_vnode;
+}
+
/*
* i_flags helper functions
*/
@@ -439,9 +450,6 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
#define XFS_ITRUNC_DEFINITE 0x1
#define XFS_ITRUNC_MAYBE 0x2
-#define XFS_ITOV(ip) ((ip)->i_vnode)
-#define XFS_ITOV_NULL(ip) ((ip)->i_vnode)
-
/*
* For multiple groups support: if S_ISGID bit is set in the parent
* directory, group of new file is set to that of the parent, and
@@ -473,11 +481,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint);
void xfs_iunlock(xfs_inode_t *, uint);
void xfs_ilock_demote(xfs_inode_t *, uint);
int xfs_isilocked(xfs_inode_t *, uint);
-void xfs_iflock(xfs_inode_t *);
-int xfs_iflock_nowait(xfs_inode_t *);
uint xfs_ilock_map_shared(xfs_inode_t *);
void xfs_iunlock_map_shared(xfs_inode_t *, uint);
-void xfs_ifunlock(xfs_inode_t *);
void xfs_ireclaim(xfs_inode_t *);
int xfs_finish_reclaim(xfs_inode_t *, int, int);
int xfs_finish_reclaim_all(struct xfs_mount *, int);
@@ -522,6 +527,7 @@ void xfs_iflush_all(struct xfs_mount *);
void xfs_ichgtime(xfs_inode_t *, int);
xfs_fsize_t xfs_file_last_byte(xfs_inode_t *);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
+void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
void xfs_synchronize_atime(xfs_inode_t *);
void xfs_mark_inode_dirty_sync(xfs_inode_t *);
@@ -570,6 +576,26 @@ extern struct kmem_zone *xfs_ifork_zone;
extern struct kmem_zone *xfs_inode_zone;
extern struct kmem_zone *xfs_ili_zone;
+/*
+ * Manage the i_flush queue embedded in the inode. This completion
+ * queue synchronizes processes attempting to flush the in-core
+ * inode back to disk.
+ */
+static inline void xfs_iflock(xfs_inode_t *ip)
+{
+ wait_for_completion(&ip->i_flush);
+}
+
+static inline int xfs_iflock_nowait(xfs_inode_t *ip)
+{
+ return try_wait_for_completion(&ip->i_flush);
+}
+
+static inline void xfs_ifunlock(xfs_inode_t *ip)
+{
+ complete(&ip->i_flush);
+}
+
#endif /* __KERNEL__ */
#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 0eee08a32c26..97c7452e2620 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -779,11 +779,10 @@ xfs_inode_item_pushbuf(
ASSERT(iip->ili_push_owner == current_pid());
/*
- * If flushlock isn't locked anymore, chances are that the
- * inode flush completed and the inode was taken off the AIL.
- * So, just get out.
+ * If a flush is not in progress anymore, chances are that the
+ * inode was taken off the AIL. So, just get out.
*/
- if (!issemalocked(&(ip->i_flock)) ||
+ if (completion_done(&ip->i_flush) ||
((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -805,7 +804,7 @@ xfs_inode_item_pushbuf(
* If not, we can flush it async.
*/
dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
- issemalocked(&(ip->i_flock)));
+ !completion_done(&ip->i_flush));
iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
xfs_buftrace("INODE ITEM PUSH", bp);
@@ -858,7 +857,7 @@ xfs_inode_item_push(
ip = iip->ili_inode;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
- ASSERT(issemalocked(&(ip->i_flock)));
+ ASSERT(!completion_done(&ip->i_flush));
/*
* Since we were able to lock the inode's flush lock and
* we found it on the AIL, the inode must be dirty. This
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 9a3ef9dcaeb9..cf6754a3c5b3 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -59,7 +59,6 @@ xfs_bulkstat_one_iget(
{
xfs_icdinode_t *dic; /* dinode core info pointer */
xfs_inode_t *ip; /* incore inode pointer */
- bhv_vnode_t *vp;
int error;
error = xfs_iget(mp, NULL, ino,
@@ -72,7 +71,6 @@ xfs_bulkstat_one_iget(
ASSERT(ip != NULL);
ASSERT(ip->i_blkno != (xfs_daddr_t)0);
- vp = XFS_ITOV(ip);
dic = &ip->i_d;
/* xfs_iget returns the following without needing
@@ -85,7 +83,7 @@ xfs_bulkstat_one_iget(
buf->bs_uid = dic->di_uid;
buf->bs_gid = dic->di_gid;
buf->bs_size = dic->di_size;
- vn_atime_to_bstime(vp, &buf->bs_atime);
+ vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime);
buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 91b00a5686cd..ccba14eb9dbe 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -160,7 +160,7 @@ void
xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
{
if (!iclog->ic_trace)
- iclog->ic_trace = ktrace_alloc(256, KM_SLEEP);
+ iclog->ic_trace = ktrace_alloc(256, KM_NOFS);
ktrace_enter(iclog->ic_trace,
(void *)((unsigned long)state),
(void *)((unsigned long)current_pid()),
@@ -336,15 +336,12 @@ xfs_log_done(xfs_mount_t *mp,
} else {
xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
xlog_regrant_reserve_log_space(log, ticket);
- }
-
- /* If this ticket was a permanent reservation and we aren't
- * trying to release it, reset the inited flags; so next time
- * we write, a start record will be written out.
- */
- if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) &&
- (flags & XFS_LOG_REL_PERM_RESERV) == 0)
+ /* If this ticket was a permanent reservation and we aren't
+ * trying to release it, reset the inited flags; so next time
+ * we write, a start record will be written out.
+ */
ticket->t_flags |= XLOG_TIC_INITED;
+ }
return lsn;
} /* xfs_log_done */
@@ -357,11 +354,11 @@ xfs_log_done(xfs_mount_t *mp,
* Asynchronous forces are implemented by setting the WANT_SYNC
* bit in the appropriate in-core log and then returning.
*
- * Synchronous forces are implemented with a semaphore. All callers
- * to force a given lsn to disk will wait on a semaphore attached to the
+ * Synchronous forces are implemented with a signal variable. All callers
+ * to force a given lsn to disk will wait on a the sv attached to the
* specific in-core log. When given in-core log finally completes its
* write to disk, that thread will wake up all threads waiting on the
- * semaphore.
+ * sv.
*/
int
_xfs_log_force(
@@ -588,12 +585,12 @@ error:
* mp - ubiquitous xfs mount point structure
*/
int
-xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags)
+xfs_log_mount_finish(xfs_mount_t *mp)
{
int error;
if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
- error = xlog_recover_finish(mp->m_log, mfsi_flags);
+ error = xlog_recover_finish(mp->m_log);
else {
error = 0;
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
@@ -707,7 +704,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
iclog->ic_state == XLOG_STATE_DIRTY)) {
if (!XLOG_FORCED_SHUTDOWN(log)) {
- sv_wait(&iclog->ic_forcesema, PMEM,
+ sv_wait(&iclog->ic_force_wait, PMEM,
&log->l_icloglock, s);
} else {
spin_unlock(&log->l_icloglock);
@@ -748,7 +745,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|| iclog->ic_state == XLOG_STATE_DIRTY
|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
- sv_wait(&iclog->ic_forcesema, PMEM,
+ sv_wait(&iclog->ic_force_wait, PMEM,
&log->l_icloglock, s);
} else {
spin_unlock(&log->l_icloglock);
@@ -838,7 +835,7 @@ xfs_log_move_tail(xfs_mount_t *mp,
break;
tail_lsn = 0;
free_bytes -= tic->t_unit_res;
- sv_signal(&tic->t_sema);
+ sv_signal(&tic->t_wait);
tic = tic->t_next;
} while (tic != log->l_write_headq);
}
@@ -859,7 +856,7 @@ xfs_log_move_tail(xfs_mount_t *mp,
break;
tail_lsn = 0;
free_bytes -= need_bytes;
- sv_signal(&tic->t_sema);
+ sv_signal(&tic->t_wait);
tic = tic->t_next;
} while (tic != log->l_reserve_headq);
}
@@ -1285,8 +1282,8 @@ xlog_alloc_log(xfs_mount_t *mp,
ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
- sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force");
- sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write");
+ sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
+ sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
iclogp = &iclog->ic_next;
}
@@ -1565,8 +1562,8 @@ xlog_dealloc_log(xlog_t *log)
iclog = log->l_iclog;
for (i=0; i<log->l_iclog_bufs; i++) {
- sv_destroy(&iclog->ic_forcesema);
- sv_destroy(&iclog->ic_writesema);
+ sv_destroy(&iclog->ic_force_wait);
+ sv_destroy(&iclog->ic_write_wait);
xfs_buf_free(iclog->ic_bp);
#ifdef XFS_LOG_TRACE
if (iclog->ic_trace != NULL) {
@@ -1976,7 +1973,7 @@ xlog_write(xfs_mount_t * mp,
/* Clean iclogs starting from the head. This ordering must be
* maintained, so an iclog doesn't become ACTIVE beyond one that
* is SYNCING. This is also required to maintain the notion that we use
- * a counting semaphore to hold off would be writers to the log when every
+ * a ordered wait queue to hold off would be writers to the log when every
* iclog is trying to sync to disk.
*
* State Change: DIRTY -> ACTIVE
@@ -2240,7 +2237,7 @@ xlog_state_do_callback(
xlog_state_clean_log(log);
/* wake up threads waiting in xfs_log_force() */
- sv_broadcast(&iclog->ic_forcesema);
+ sv_broadcast(&iclog->ic_force_wait);
iclog = iclog->ic_next;
} while (first_iclog != iclog);
@@ -2302,8 +2299,7 @@ xlog_state_do_callback(
* the second completion goes through.
*
* Callbacks could take time, so they are done outside the scope of the
- * global state machine log lock. Assume that the calls to cvsema won't
- * take a long time. At least we know it won't sleep.
+ * global state machine log lock.
*/
STATIC void
xlog_state_done_syncing(
@@ -2339,7 +2335,7 @@ xlog_state_done_syncing(
* iclog buffer, we wake them all, one will get to do the
* I/O, the others get to wait for the result.
*/
- sv_broadcast(&iclog->ic_writesema);
+ sv_broadcast(&iclog->ic_write_wait);
spin_unlock(&log->l_icloglock);
xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
} /* xlog_state_done_syncing */
@@ -2347,11 +2343,9 @@ xlog_state_done_syncing(
/*
* If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
- * sleep. The flush semaphore is set to the number of in-core buffers and
- * decremented around disk syncing. Therefore, if all buffers are syncing,
- * this semaphore will cause new writes to sleep until a sync completes.
- * Otherwise, this code just does p() followed by v(). This approximates
- * a sleep/wakeup except we can't race.
+ * sleep. We wait on the flush queue on the head iclog as that should be
+ * the first iclog to complete flushing. Hence if all iclogs are syncing,
+ * we will wait here and all new writes will sleep until a sync completes.
*
* The in-core logs are used in a circular fashion. They are not used
* out-of-order even when an iclog past the head is free.
@@ -2508,7 +2502,7 @@ xlog_grant_log_space(xlog_t *log,
goto error_return;
XFS_STATS_INC(xs_sleep_logspace);
- sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+ sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
/*
* If we got an error, and the filesystem is shutting down,
* we'll catch it down below. So just continue...
@@ -2534,7 +2528,7 @@ redo:
xlog_trace_loggrant(log, tic,
"xlog_grant_log_space: sleep 2");
XFS_STATS_INC(xs_sleep_logspace);
- sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+ sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
if (XLOG_FORCED_SHUTDOWN(log)) {
spin_lock(&log->l_grant_lock);
@@ -2633,7 +2627,7 @@ xlog_regrant_write_log_space(xlog_t *log,
if (free_bytes < ntic->t_unit_res)
break;
free_bytes -= ntic->t_unit_res;
- sv_signal(&ntic->t_sema);
+ sv_signal(&ntic->t_wait);
ntic = ntic->t_next;
} while (ntic != log->l_write_headq);
@@ -2644,7 +2638,7 @@ xlog_regrant_write_log_space(xlog_t *log,
xlog_trace_loggrant(log, tic,
"xlog_regrant_write_log_space: sleep 1");
XFS_STATS_INC(xs_sleep_logspace);
- sv_wait(&tic->t_sema, PINOD|PLTWAIT,
+ sv_wait(&tic->t_wait, PINOD|PLTWAIT,
&log->l_grant_lock, s);
/* If we're shutting down, this tic is already
@@ -2673,7 +2667,7 @@ redo:
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
xlog_ins_ticketq(&log->l_write_headq, tic);
XFS_STATS_INC(xs_sleep_logspace);
- sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
+ sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
/* If we're shutting down, this tic is already off the queue */
if (XLOG_FORCED_SHUTDOWN(log)) {
@@ -2916,7 +2910,7 @@ xlog_state_switch_iclogs(xlog_t *log,
* 2. the current iclog is drity, and the previous iclog is in the
* active or dirty state.
*
- * We may sleep (call psema) if:
+ * We may sleep if:
*
* 1. the current iclog is not in the active nor dirty state.
* 2. the current iclog dirty, and the previous iclog is not in the
@@ -3013,7 +3007,7 @@ maybe_sleep:
return XFS_ERROR(EIO);
}
XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s);
+ sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
/*
* No need to grab the log lock here since we're
* only deciding whether or not to return EIO
@@ -3096,7 +3090,7 @@ try_again:
XLOG_STATE_SYNCING))) {
ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_prev->ic_writesema, PSWP,
+ sv_wait(&iclog->ic_prev->ic_write_wait, PSWP,
&log->l_icloglock, s);
*log_flushed = 1;
already_slept = 1;
@@ -3116,7 +3110,7 @@ try_again:
!(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
/*
- * Don't wait on the forcesema if we know that we've
+ * Don't wait on completion if we know that we've
* gotten a log write error.
*/
if (iclog->ic_state & XLOG_STATE_IOERROR) {
@@ -3124,7 +3118,7 @@ try_again:
return XFS_ERROR(EIO);
}
XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s);
+ sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
/*
* No need to grab the log lock here since we're
* only deciding whether or not to return EIO
@@ -3180,7 +3174,7 @@ STATIC void
xlog_ticket_put(xlog_t *log,
xlog_ticket_t *ticket)
{
- sv_destroy(&ticket->t_sema);
+ sv_destroy(&ticket->t_wait);
kmem_zone_free(xfs_log_ticket_zone, ticket);
} /* xlog_ticket_put */
@@ -3270,7 +3264,7 @@ xlog_ticket_get(xlog_t *log,
tic->t_trans_type = 0;
if (xflags & XFS_LOG_PERM_RESERV)
tic->t_flags |= XLOG_TIC_PERM_RESERV;
- sv_init(&(tic->t_sema), SV_DEFAULT, "logtick");
+ sv_init(&(tic->t_wait), SV_DEFAULT, "logtick");
xlog_tic_reset_res(tic);
@@ -3557,14 +3551,14 @@ xfs_log_force_umount(
*/
if ((tic = log->l_reserve_headq)) {
do {
- sv_signal(&tic->t_sema);
+ sv_signal(&tic->t_wait);
tic = tic->t_next;
} while (tic != log->l_reserve_headq);
}
if ((tic = log->l_write_headq)) {
do {
- sv_signal(&tic->t_sema);
+ sv_signal(&tic->t_wait);
tic = tic->t_next;
} while (tic != log->l_write_headq);
}
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index d1d678ecb63e..d47b91f10822 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -149,7 +149,7 @@ int xfs_log_mount(struct xfs_mount *mp,
struct xfs_buftarg *log_target,
xfs_daddr_t start_block,
int num_bblocks);
-int xfs_log_mount_finish(struct xfs_mount *mp, int);
+int xfs_log_mount_finish(struct xfs_mount *mp);
void xfs_log_move_tail(struct xfs_mount *mp,
xfs_lsn_t tail_lsn);
int xfs_log_notify(struct xfs_mount *mp,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 6245913196b4..c8a5b22ee3e3 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -241,7 +241,7 @@ typedef struct xlog_res {
} xlog_res_t;
typedef struct xlog_ticket {
- sv_t t_sema; /* sleep on this semaphore : 20 */
+ sv_t t_wait; /* ticket wait queue : 20 */
struct xlog_ticket *t_next; /* :4|8 */
struct xlog_ticket *t_prev; /* :4|8 */
xlog_tid_t t_tid; /* transaction identifier : 4 */
@@ -314,7 +314,7 @@ typedef struct xlog_rec_ext_header {
* xlog_rec_header_t into the reserved space.
* - ic_data follows, so a write to disk can start at the beginning of
* the iclog.
- * - ic_forcesema is used to implement synchronous forcing of the iclog to disk.
+ * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
* - ic_next is the pointer to the next iclog in the ring.
* - ic_bp is a pointer to the buffer used to write this incore log to disk.
* - ic_log is a pointer back to the global log structure.
@@ -339,8 +339,8 @@ typedef struct xlog_rec_ext_header {
* and move everything else out to subsequent cachelines.
*/
typedef struct xlog_iclog_fields {
- sv_t ic_forcesema;
- sv_t ic_writesema;
+ sv_t ic_force_wait;
+ sv_t ic_write_wait;
struct xlog_in_core *ic_next;
struct xlog_in_core *ic_prev;
struct xfs_buf *ic_bp;
@@ -377,8 +377,8 @@ typedef struct xlog_in_core {
/*
* Defines to save our code from this glop.
*/
-#define ic_forcesema hic_fields.ic_forcesema
-#define ic_writesema hic_fields.ic_writesema
+#define ic_force_wait hic_fields.ic_force_wait
+#define ic_write_wait hic_fields.ic_write_wait
#define ic_next hic_fields.ic_next
#define ic_prev hic_fields.ic_prev
#define ic_bp hic_fields.ic_bp
@@ -468,7 +468,7 @@ extern int xlog_find_tail(xlog_t *log,
xfs_daddr_t *head_blk,
xfs_daddr_t *tail_blk);
extern int xlog_recover(xlog_t *log);
-extern int xlog_recover_finish(xlog_t *log, int mfsi_flags);
+extern int xlog_recover_finish(xlog_t *log);
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
extern void xlog_recover_process_iunlinks(xlog_t *log);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 9eb722ec744e..82d46ce69d5f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3940,8 +3940,7 @@ xlog_recover(
*/
int
xlog_recover_finish(
- xlog_t *log,
- int mfsi_flags)
+ xlog_t *log)
{
/*
* Now we're ready to do the transactions needed for the
@@ -3969,9 +3968,7 @@ xlog_recover_finish(
xfs_log_force(log->l_mp, (xfs_lsn_t)0,
(XFS_LOG_FORCE | XFS_LOG_SYNC));
- if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) {
- xlog_recover_process_iunlinks(log);
- }
+ xlog_recover_process_iunlinks(log);
xlog_recover_check_summary(log);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 6c5d1325e7f6..a4503f5e9497 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -128,7 +128,7 @@ static const struct {
* initialized.
*/
STATIC void
-xfs_mount_free(
+xfs_free_perag(
xfs_mount_t *mp)
{
if (mp->m_perag) {
@@ -139,20 +139,6 @@ xfs_mount_free(
kmem_free(mp->m_perag[agno].pagb_list);
kmem_free(mp->m_perag);
}
-
- spinlock_destroy(&mp->m_ail_lock);
- spinlock_destroy(&mp->m_sb_lock);
- mutex_destroy(&mp->m_ilock);
- mutex_destroy(&mp->m_growlock);
- if (mp->m_quotainfo)
- XFS_QM_DONE(mp);
-
- if (mp->m_fsname != NULL)
- kmem_free(mp->m_fsname);
- if (mp->m_rtname != NULL)
- kmem_free(mp->m_rtname);
- if (mp->m_logname != NULL)
- kmem_free(mp->m_logname);
}
/*
@@ -704,11 +690,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
* Update alignment values based on mount options and sb values
*/
STATIC int
-xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags)
+xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags)
{
xfs_sb_t *sbp = &(mp->m_sb);
- if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) {
+ if (mp->m_dalign) {
/*
* If stripe unit and stripe width are not multiples
* of the fs blocksize turn off alignment.
@@ -864,7 +850,7 @@ xfs_set_inoalignment(xfs_mount_t *mp)
* Check that the data (and log if separate) are an ok size.
*/
STATIC int
-xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
+xfs_check_sizes(xfs_mount_t *mp)
{
xfs_buf_t *bp;
xfs_daddr_t d;
@@ -887,8 +873,7 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
return error;
}
- if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) &&
- mp->m_logdev_targp != mp->m_ddev_targp) {
+ if (mp->m_logdev_targp != mp->m_ddev_targp) {
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
cmn_err(CE_WARN, "XFS: size check 3 failed");
@@ -923,15 +908,13 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
*/
int
xfs_mountfs(
- xfs_mount_t *mp,
- int mfsi_flags)
+ xfs_mount_t *mp)
{
xfs_sb_t *sbp = &(mp->m_sb);
xfs_inode_t *rip;
__uint64_t resblks;
__int64_t update_flags = 0LL;
uint quotamount, quotaflags;
- int agno;
int uuid_mounted = 0;
int error = 0;
@@ -985,7 +968,7 @@ xfs_mountfs(
* allocator alignment is within an ag, therefore ag has
* to be aligned at stripe boundary.
*/
- error = xfs_update_alignment(mp, mfsi_flags, &update_flags);
+ error = xfs_update_alignment(mp, &update_flags);
if (error)
goto error1;
@@ -1004,8 +987,7 @@ xfs_mountfs(
* since a single partition filesystem is identical to a single
* partition volume/filesystem.
*/
- if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
- (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
+ if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
if (xfs_uuid_mount(mp)) {
error = XFS_ERROR(EINVAL);
goto error1;
@@ -1033,7 +1015,7 @@ xfs_mountfs(
/*
* Check that the data (and log if separate) are an ok size.
*/
- error = xfs_check_sizes(mp, mfsi_flags);
+ error = xfs_check_sizes(mp);
if (error)
goto error1;
@@ -1047,13 +1029,6 @@ xfs_mountfs(
}
/*
- * For client case we are done now
- */
- if (mfsi_flags & XFS_MFSI_CLIENT) {
- return 0;
- }
-
- /*
* Copies the low order bits of the timestamp and the randomly
* set "sequence" number out of a UUID.
*/
@@ -1077,8 +1052,10 @@ xfs_mountfs(
* Allocate and initialize the per-ag data.
*/
init_rwsem(&mp->m_peraglock);
- mp->m_perag =
- kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP);
+ mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
+ KM_MAYFAIL);
+ if (!mp->m_perag)
+ goto error1;
mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
@@ -1190,7 +1167,7 @@ xfs_mountfs(
* delayed until after the root and real-time bitmap inodes
* were consistently read in.
*/
- error = xfs_log_mount_finish(mp, mfsi_flags);
+ error = xfs_log_mount_finish(mp);
if (error) {
cmn_err(CE_WARN, "XFS: log mount finish failed");
goto error4;
@@ -1199,7 +1176,7 @@ xfs_mountfs(
/*
* Complete the quota initialisation, post-log-replay component.
*/
- error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags);
+ error = XFS_QM_MOUNT(mp, quotamount, quotaflags);
if (error)
goto error4;
@@ -1233,12 +1210,7 @@ xfs_mountfs(
error3:
xfs_log_unmount_dealloc(mp);
error2:
- for (agno = 0; agno < sbp->sb_agcount; agno++)
- if (mp->m_perag[agno].pagb_list)
- kmem_free(mp->m_perag[agno].pagb_list);
- kmem_free(mp->m_perag);
- mp->m_perag = NULL;
- /* FALLTHROUGH */
+ xfs_free_perag(mp);
error1:
if (uuid_mounted)
uuid_table_remove(&mp->m_sb.sb_uuid);
@@ -1246,16 +1218,17 @@ xfs_mountfs(
}
/*
- * xfs_unmountfs
- *
* This flushes out the inodes,dquots and the superblock, unmounts the
* log and makes sure that incore structures are freed.
*/
-int
-xfs_unmountfs(xfs_mount_t *mp)
+void
+xfs_unmountfs(
+ struct xfs_mount *mp)
{
- __uint64_t resblks;
- int error = 0;
+ __uint64_t resblks;
+ int error;
+
+ IRELE(mp->m_rootip);
/*
* We can potentially deadlock here if we have an inode cluster
@@ -1312,8 +1285,6 @@ xfs_unmountfs(xfs_mount_t *mp)
xfs_unmountfs_wait(mp); /* wait for async bufs */
xfs_log_unmount(mp); /* Done! No more fs ops. */
- xfs_freesb(mp);
-
/*
* All inodes from this mount point should be freed.
*/
@@ -1322,11 +1293,12 @@ xfs_unmountfs(xfs_mount_t *mp)
if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
uuid_table_remove(&mp->m_sb.sb_uuid);
-#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
+#if defined(DEBUG)
xfs_errortag_clearall(mp, 0);
#endif
- xfs_mount_free(mp);
- return 0;
+ xfs_free_perag(mp);
+ if (mp->m_quotainfo)
+ XFS_QM_DONE(mp);
}
STATIC void
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 5269bd6e3df0..f3c1024b1241 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -114,7 +114,7 @@ struct xfs_dqtrxops;
struct xfs_quotainfo;
typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
-typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint, int);
+typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint);
typedef int (*xfs_qmunmount_t)(struct xfs_mount *);
typedef void (*xfs_qmdone_t)(struct xfs_mount *);
typedef void (*xfs_dqrele_t)(struct xfs_dquot *);
@@ -158,8 +158,8 @@ typedef struct xfs_qmops {
#define XFS_QM_INIT(mp, mnt, fl) \
(*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl)
-#define XFS_QM_MOUNT(mp, mnt, fl, mfsi_flags) \
- (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl, mfsi_flags)
+#define XFS_QM_MOUNT(mp, mnt, fl) \
+ (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl)
#define XFS_QM_UNMOUNT(mp) \
(*(mp)->m_qm_ops->xfs_qmunmount)(mp)
#define XFS_QM_DONE(mp) \
@@ -442,13 +442,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
/*
* Flags for xfs_mountfs
*/
-#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */
-#define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */
-/* XFS_MFSI_RRINODES */
-#define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */
- /* log recovery */
-#define XFS_MFSI_NO_QUOTACHECK 0x10 /* Skip quotacheck processing */
-/* XFS_MFSI_CONVERT_SUNIT */
#define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d)
@@ -517,10 +510,10 @@ typedef struct xfs_mod_sb {
extern void xfs_mod_sb(xfs_trans_t *, __int64_t);
extern int xfs_log_sbcount(xfs_mount_t *, uint);
-extern int xfs_mountfs(xfs_mount_t *mp, int);
+extern int xfs_mountfs(xfs_mount_t *mp);
extern void xfs_mountfs_check_barriers(xfs_mount_t *mp);
-extern int xfs_unmountfs(xfs_mount_t *);
+extern void xfs_unmountfs(xfs_mount_t *);
extern int xfs_unmountfs_writesb(xfs_mount_t *);
extern int xfs_unmount_flush(xfs_mount_t *, int);
extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index bf87a5913504..e2f68de16159 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -74,18 +74,6 @@ STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int,
*/
/*
- * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set.
- */
-STATIC int
-xfs_lowbit32(
- __uint32_t v)
-{
- if (v)
- return ffs(v) - 1;
- return -1;
-}
-
-/*
* Allocate space to the bitmap or summary file, and zero it, for growfs.
*/
STATIC int /* error */
@@ -450,6 +438,7 @@ xfs_rtallocate_extent_near(
}
bbno = XFS_BITTOBLOCK(mp, bno);
i = 0;
+ ASSERT(minlen != 0);
log2len = xfs_highbit32(minlen);
/*
* Loop over all bitmap blocks (bbno + i is current block).
@@ -618,6 +607,8 @@ xfs_rtallocate_extent_size(
xfs_suminfo_t sum; /* summary information for extents */
ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+ ASSERT(maxlen != 0);
+
/*
* Loop over all the levels starting with maxlen.
* At each level, look at all the bitmap blocks, to see if there
@@ -675,6 +666,9 @@ xfs_rtallocate_extent_size(
*rtblock = NULLRTBLOCK;
return 0;
}
+ ASSERT(minlen != 0);
+ ASSERT(maxlen != 0);
+
/*
* Loop over sizes, from maxlen down to minlen.
* This time, when we do the allocations, allow smaller ones
@@ -1961,6 +1955,7 @@ xfs_growfs_rt(
nsbp->sb_blocksize * nsbp->sb_rextsize);
nsbp->sb_rextents = nsbp->sb_rblocks;
do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
+ ASSERT(nsbp->sb_rextents != 0);
nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
nrsumsize =
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index b0f31c09a76d..3a82576dde9a 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -314,7 +314,7 @@ xfs_bioerror_relse(
* ASYNC buffers.
*/
XFS_BUF_ERROR(bp, EIO);
- XFS_BUF_V_IODONESEMA(bp);
+ XFS_BUF_FINISH_IOWAIT(bp);
} else {
xfs_buf_relse(bp);
}
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index e4ebddd3c500..4e1c22a23be5 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -43,6 +43,7 @@
#include "xfs_quota.h"
#include "xfs_trans_priv.h"
#include "xfs_trans_space.h"
+#include "xfs_inode_item.h"
STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *);
@@ -253,7 +254,7 @@ _xfs_trans_alloc(
tp->t_mountp = mp;
tp->t_items_free = XFS_LIC_NUM_SLOTS;
tp->t_busy_free = XFS_LBC_NUM_SLOTS;
- XFS_LIC_INIT(&(tp->t_items));
+ xfs_lic_init(&(tp->t_items));
XFS_LBC_INIT(&(tp->t_busy));
return tp;
}
@@ -282,7 +283,7 @@ xfs_trans_dup(
ntp->t_mountp = tp->t_mountp;
ntp->t_items_free = XFS_LIC_NUM_SLOTS;
ntp->t_busy_free = XFS_LBC_NUM_SLOTS;
- XFS_LIC_INIT(&(ntp->t_items));
+ xfs_lic_init(&(ntp->t_items));
XFS_LBC_INIT(&(ntp->t_busy));
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -1169,7 +1170,7 @@ xfs_trans_cancel(
while (licp != NULL) {
lidp = licp->lic_descs;
for (i = 0; i < licp->lic_unused; i++, lidp++) {
- if (XFS_LIC_ISFREE(licp, i)) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
@@ -1216,6 +1217,68 @@ xfs_trans_free(
kmem_zone_free(xfs_trans_zone, tp);
}
+/*
+ * Roll from one trans in the sequence of PERMANENT transactions to
+ * the next: permanent transactions are only flushed out when
+ * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
+ * as possible to let chunks of it go to the log. So we commit the
+ * chunk we've been working on and get a new transaction to continue.
+ */
+int
+xfs_trans_roll(
+ struct xfs_trans **tpp,
+ struct xfs_inode *dp)
+{
+ struct xfs_trans *trans;
+ unsigned int logres, count;
+ int error;
+
+ /*
+ * Ensure that the inode is always logged.
+ */
+ trans = *tpp;
+ xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
+
+ /*
+ * Copy the critical parameters from one trans to the next.
+ */
+ logres = trans->t_log_res;
+ count = trans->t_log_count;
+ *tpp = xfs_trans_dup(trans);
+
+ /*
+ * Commit the current transaction.
+ * If this commit failed, then it'd just unlock those items that
+ * are not marked ihold. That also means that a filesystem shutdown
+ * is in progress. The caller takes the responsibility to cancel
+ * the duplicate transaction that gets returned.
+ */
+ error = xfs_trans_commit(trans, 0);
+ if (error)
+ return (error);
+
+ trans = *tpp;
+
+ /*
+ * Reserve space in the log for th next transaction.
+ * This also pushes items in the "AIL", the list of logged items,
+ * out to disk if they are taking up space at the tail of the log
+ * that we want to use. This requires that either nothing be locked
+ * across this call, or that anything that is locked be logged in
+ * the prior and the next transactions.
+ */
+ error = xfs_trans_reserve(trans, 0, logres, 0,
+ XFS_TRANS_PERM_LOG_RES, count);
+ /*
+ * Ensure that the inode is in the new transaction and locked.
+ */
+ if (error)
+ return error;
+
+ xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(trans, dp);
+ return 0;
+}
/*
* THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item().
@@ -1253,7 +1316,7 @@ xfs_trans_committed(
* Special case the chunk embedded in the transaction.
*/
licp = &(tp->t_items);
- if (!(XFS_LIC_ARE_ALL_FREE(licp))) {
+ if (!(xfs_lic_are_all_free(licp))) {
xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
}
@@ -1262,7 +1325,7 @@ xfs_trans_committed(
*/
licp = licp->lic_next;
while (licp != NULL) {
- ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+ ASSERT(!xfs_lic_are_all_free(licp));
xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
next_licp = licp->lic_next;
kmem_free(licp);
@@ -1325,7 +1388,7 @@ xfs_trans_chunk_committed(
lidp = licp->lic_descs;
for (i = 0; i < licp->lic_unused; i++, lidp++) {
- if (XFS_LIC_ISFREE(licp, i)) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 0804207c7391..74c80bd2b0ec 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -210,62 +210,52 @@ typedef struct xfs_log_item_chunk {
* lic_unused to the right value (0 matches all free). The
* lic_descs.lid_index values are set up as each desc is allocated.
*/
-#define XFS_LIC_INIT(cp) xfs_lic_init(cp)
static inline void xfs_lic_init(xfs_log_item_chunk_t *cp)
{
cp->lic_free = XFS_LIC_FREEMASK;
}
-#define XFS_LIC_INIT_SLOT(cp,slot) xfs_lic_init_slot(cp, slot)
static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot)
{
cp->lic_descs[slot].lid_index = (unsigned char)(slot);
}
-#define XFS_LIC_VACANCY(cp) xfs_lic_vacancy(cp)
static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp)
{
return cp->lic_free & XFS_LIC_FREEMASK;
}
-#define XFS_LIC_ALL_FREE(cp) xfs_lic_all_free(cp)
static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp)
{
cp->lic_free = XFS_LIC_FREEMASK;
}
-#define XFS_LIC_ARE_ALL_FREE(cp) xfs_lic_are_all_free(cp)
static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp)
{
return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK);
}
-#define XFS_LIC_ISFREE(cp,slot) xfs_lic_isfree(cp,slot)
static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot)
{
return (cp->lic_free & (1 << slot));
}
-#define XFS_LIC_CLAIM(cp,slot) xfs_lic_claim(cp,slot)
static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot)
{
cp->lic_free &= ~(1 << slot);
}
-#define XFS_LIC_RELSE(cp,slot) xfs_lic_relse(cp,slot)
static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot)
{
cp->lic_free |= 1 << slot;
}
-#define XFS_LIC_SLOT(cp,slot) xfs_lic_slot(cp,slot)
static inline xfs_log_item_desc_t *
xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot)
{
return &(cp->lic_descs[slot]);
}
-#define XFS_LIC_DESC_TO_SLOT(dp) xfs_lic_desc_to_slot(dp)
static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
{
return (uint)dp->lid_index;
@@ -278,7 +268,6 @@ static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
* All of this yields the address of the chunk, which is
* cast to a chunk pointer.
*/
-#define XFS_LIC_DESC_TO_CHUNK(dp) xfs_lic_desc_to_chunk(dp)
static inline xfs_log_item_chunk_t *
xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
{
@@ -986,6 +975,7 @@ int _xfs_trans_commit(xfs_trans_t *,
int *);
#define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL)
void xfs_trans_cancel(xfs_trans_t *, int);
+int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
int xfs_trans_ail_init(struct xfs_mount *);
void xfs_trans_ail_destroy(struct xfs_mount *);
void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index cb0c5839154b..4e855b5ced66 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -1021,16 +1021,16 @@ xfs_trans_buf_item_match(
bp = NULL;
len = BBTOB(len);
licp = &tp->t_items;
- if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+ if (!xfs_lic_are_all_free(licp)) {
for (i = 0; i < licp->lic_unused; i++) {
/*
* Skip unoccupied slots.
*/
- if (XFS_LIC_ISFREE(licp, i)) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
- lidp = XFS_LIC_SLOT(licp, i);
+ lidp = xfs_lic_slot(licp, i);
blip = (xfs_buf_log_item_t *)lidp->lid_item;
if (blip->bli_item.li_type != XFS_LI_BUF) {
continue;
@@ -1074,7 +1074,7 @@ xfs_trans_buf_item_match_all(
bp = NULL;
len = BBTOB(len);
for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
- if (XFS_LIC_ARE_ALL_FREE(licp)) {
+ if (xfs_lic_are_all_free(licp)) {
ASSERT(licp == &tp->t_items);
ASSERT(licp->lic_next == NULL);
return NULL;
@@ -1083,11 +1083,11 @@ xfs_trans_buf_item_match_all(
/*
* Skip unoccupied slots.
*/
- if (XFS_LIC_ISFREE(licp, i)) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
- lidp = XFS_LIC_SLOT(licp, i);
+ lidp = xfs_lic_slot(licp, i);
blip = (xfs_buf_log_item_t *)lidp->lid_item;
if (blip->bli_item.li_type != XFS_LI_BUF) {
continue;
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
index db5c83595526..3c666e8317f8 100644
--- a/fs/xfs/xfs_trans_item.c
+++ b/fs/xfs/xfs_trans_item.c
@@ -53,11 +53,11 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
* Initialize the chunk, and then
* claim the first slot in the newly allocated chunk.
*/
- XFS_LIC_INIT(licp);
- XFS_LIC_CLAIM(licp, 0);
+ xfs_lic_init(licp);
+ xfs_lic_claim(licp, 0);
licp->lic_unused = 1;
- XFS_LIC_INIT_SLOT(licp, 0);
- lidp = XFS_LIC_SLOT(licp, 0);
+ xfs_lic_init_slot(licp, 0);
+ lidp = xfs_lic_slot(licp, 0);
/*
* Link in the new chunk and update the free count.
@@ -88,14 +88,14 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
*/
licp = &tp->t_items;
while (licp != NULL) {
- if (XFS_LIC_VACANCY(licp)) {
+ if (xfs_lic_vacancy(licp)) {
if (licp->lic_unused <= XFS_LIC_MAX_SLOT) {
i = licp->lic_unused;
- ASSERT(XFS_LIC_ISFREE(licp, i));
+ ASSERT(xfs_lic_isfree(licp, i));
break;
}
for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) {
- if (XFS_LIC_ISFREE(licp, i))
+ if (xfs_lic_isfree(licp, i))
break;
}
ASSERT(i <= XFS_LIC_MAX_SLOT);
@@ -108,12 +108,12 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
* If we find a free descriptor, claim it,
* initialize it, and return it.
*/
- XFS_LIC_CLAIM(licp, i);
+ xfs_lic_claim(licp, i);
if (licp->lic_unused <= i) {
licp->lic_unused = i + 1;
- XFS_LIC_INIT_SLOT(licp, i);
+ xfs_lic_init_slot(licp, i);
}
- lidp = XFS_LIC_SLOT(licp, i);
+ lidp = xfs_lic_slot(licp, i);
tp->t_items_free--;
lidp->lid_item = lip;
lidp->lid_flags = 0;
@@ -136,9 +136,9 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
xfs_log_item_chunk_t *licp;
xfs_log_item_chunk_t **licpp;
- slot = XFS_LIC_DESC_TO_SLOT(lidp);
- licp = XFS_LIC_DESC_TO_CHUNK(lidp);
- XFS_LIC_RELSE(licp, slot);
+ slot = xfs_lic_desc_to_slot(lidp);
+ licp = xfs_lic_desc_to_chunk(lidp);
+ xfs_lic_relse(licp, slot);
lidp->lid_item->li_desc = NULL;
tp->t_items_free++;
@@ -154,7 +154,7 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
* Also decrement the transaction structure's count of free items
* by the number in a chunk since we are freeing an empty chunk.
*/
- if (XFS_LIC_ARE_ALL_FREE(licp) && (licp != &(tp->t_items))) {
+ if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) {
licpp = &(tp->t_items.lic_next);
while (*licpp != licp) {
ASSERT(*licpp != NULL);
@@ -207,20 +207,20 @@ xfs_trans_first_item(xfs_trans_t *tp)
/*
* If it's not in the first chunk, skip to the second.
*/
- if (XFS_LIC_ARE_ALL_FREE(licp)) {
+ if (xfs_lic_are_all_free(licp)) {
licp = licp->lic_next;
}
/*
* Return the first non-free descriptor in the chunk.
*/
- ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+ ASSERT(!xfs_lic_are_all_free(licp));
for (i = 0; i < licp->lic_unused; i++) {
- if (XFS_LIC_ISFREE(licp, i)) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
- return XFS_LIC_SLOT(licp, i);
+ return xfs_lic_slot(licp, i);
}
cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item");
return NULL;
@@ -242,18 +242,18 @@ xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
xfs_log_item_chunk_t *licp;
int i;
- licp = XFS_LIC_DESC_TO_CHUNK(lidp);
+ licp = xfs_lic_desc_to_chunk(lidp);
/*
* First search the rest of the chunk. The for loop keeps us
* from referencing things beyond the end of the chunk.
*/
- for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) {
- if (XFS_LIC_ISFREE(licp, i)) {
+ for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
- return XFS_LIC_SLOT(licp, i);
+ return xfs_lic_slot(licp, i);
}
/*
@@ -266,13 +266,13 @@ xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
}
licp = licp->lic_next;
- ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+ ASSERT(!xfs_lic_are_all_free(licp));
for (i = 0; i < licp->lic_unused; i++) {
- if (XFS_LIC_ISFREE(licp, i)) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
- return XFS_LIC_SLOT(licp, i);
+ return xfs_lic_slot(licp, i);
}
ASSERT(0);
/* NOTREACHED */
@@ -300,9 +300,9 @@ xfs_trans_free_items(
/*
* Special case the embedded chunk so we don't free it below.
*/
- if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+ if (!xfs_lic_are_all_free(licp)) {
(void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
- XFS_LIC_ALL_FREE(licp);
+ xfs_lic_all_free(licp);
licp->lic_unused = 0;
}
licp = licp->lic_next;
@@ -311,7 +311,7 @@ xfs_trans_free_items(
* Unlock each item in each chunk and free the chunks.
*/
while (licp != NULL) {
- ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+ ASSERT(!xfs_lic_are_all_free(licp));
(void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
next_licp = licp->lic_next;
kmem_free(licp);
@@ -347,7 +347,7 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
/*
* Special case the embedded chunk so we don't free.
*/
- if (!XFS_LIC_ARE_ALL_FREE(licp)) {
+ if (!xfs_lic_are_all_free(licp)) {
freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
}
licpp = &(tp->t_items.lic_next);
@@ -358,10 +358,10 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
* and free empty chunks.
*/
while (licp != NULL) {
- ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
+ ASSERT(!xfs_lic_are_all_free(licp));
freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
next_licp = licp->lic_next;
- if (XFS_LIC_ARE_ALL_FREE(licp)) {
+ if (xfs_lic_are_all_free(licp)) {
*licpp = next_licp;
kmem_free(licp);
freed -= XFS_LIC_NUM_SLOTS;
@@ -402,7 +402,7 @@ xfs_trans_unlock_chunk(
freed = 0;
lidp = licp->lic_descs;
for (i = 0; i < licp->lic_unused; i++, lidp++) {
- if (XFS_LIC_ISFREE(licp, i)) {
+ if (xfs_lic_isfree(licp, i)) {
continue;
}
lip = lidp->lid_item;
@@ -421,7 +421,7 @@ xfs_trans_unlock_chunk(
*/
if (!(freeing_chunk) &&
(!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) {
- XFS_LIC_RELSE(licp, i);
+ xfs_lic_relse(licp, i);
freed++;
}
}
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 98e5f110ba5f..35d4d414bcc2 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -237,7 +237,7 @@ xfs_droplink(
ASSERT (ip->i_d.di_nlink > 0);
ip->i_d.di_nlink--;
- drop_nlink(ip->i_vnode);
+ drop_nlink(VFS_I(ip));
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = 0;
@@ -301,7 +301,7 @@ xfs_bumplink(
ASSERT(ip->i_d.di_nlink > 0);
ip->i_d.di_nlink++;
- inc_nlink(ip->i_vnode);
+ inc_nlink(VFS_I(ip));
if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) &&
(ip->i_d.di_nlink > XFS_MAXLINK_1)) {
/*
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index f316cb85d8e2..ef321225d269 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -18,9 +18,6 @@
#ifndef __XFS_UTILS_H__
#define __XFS_UTILS_H__
-#define IRELE(ip) VN_RELE(XFS_ITOV(ip))
-#define IHOLD(ip) VN_HOLD(XFS_ITOV(ip))
-
extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
xfs_dev_t, cred_t *, prid_t, int,
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 4a9a43315a86..439dd3939dda 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -128,7 +128,6 @@ xfs_unmount_flush(
xfs_inode_t *rip = mp->m_rootip;
xfs_inode_t *rbmip;
xfs_inode_t *rsumip = NULL;
- bhv_vnode_t *rvp = XFS_ITOV(rip);
int error;
xfs_ilock(rip, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
@@ -146,7 +145,7 @@ xfs_unmount_flush(
if (error == EFSCORRUPTED)
goto fscorrupt_out;
- ASSERT(vn_count(XFS_ITOV(rbmip)) == 1);
+ ASSERT(vn_count(VFS_I(rbmip)) == 1);
rsumip = mp->m_rsumip;
xfs_ilock(rsumip, XFS_ILOCK_EXCL);
@@ -157,7 +156,7 @@ xfs_unmount_flush(
if (error == EFSCORRUPTED)
goto fscorrupt_out;
- ASSERT(vn_count(XFS_ITOV(rsumip)) == 1);
+ ASSERT(vn_count(VFS_I(rsumip)) == 1);
}
/*
@@ -167,7 +166,7 @@ xfs_unmount_flush(
if (error == EFSCORRUPTED)
goto fscorrupt_out2;
- if (vn_count(rvp) != 1 && !relocation) {
+ if (vn_count(VFS_I(rip)) != 1 && !relocation) {
xfs_iunlock(rip, XFS_ILOCK_EXCL);
return XFS_ERROR(EBUSY);
}
@@ -284,7 +283,7 @@ xfs_sync_inodes(
int *bypassed)
{
xfs_inode_t *ip = NULL;
- bhv_vnode_t *vp = NULL;
+ struct inode *vp = NULL;
int error;
int last_error;
uint64_t fflag;
@@ -404,7 +403,7 @@ xfs_sync_inodes(
continue;
}
- vp = XFS_ITOV_NULL(ip);
+ vp = VFS_I(ip);
/*
* If the vnode is gone then this is being torn down,
@@ -479,7 +478,7 @@ xfs_sync_inodes(
IPOINTER_INSERT(ip, mp);
xfs_ilock(ip, lock_flags);
- ASSERT(vp == XFS_ITOV(ip));
+ ASSERT(vp == VFS_I(ip));
ASSERT(ip->i_mount == mp);
vnode_refed = B_TRUE;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 76a1166af822..aa238c8fbd7a 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -83,7 +83,7 @@ xfs_setattr(
cred_t *credp)
{
xfs_mount_t *mp = ip->i_mount;
- struct inode *inode = XFS_ITOV(ip);
+ struct inode *inode = VFS_I(ip);
int mask = iattr->ia_valid;
xfs_trans_t *tp;
int code;
@@ -182,7 +182,7 @@ xfs_setattr(
xfs_ilock(ip, lock_flags);
/* boolean: are we the file owner? */
- file_owner = (current_fsuid(credp) == ip->i_d.di_uid);
+ file_owner = (current_fsuid() == ip->i_d.di_uid);
/*
* Change various properties of a file.
@@ -513,7 +513,6 @@ xfs_setattr(
ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
ip->i_update_core = 1;
- timeflags &= ~XFS_ICHGTIME_ACC;
}
if (mask & ATTR_MTIME) {
inode->i_mtime = iattr->ia_mtime;
@@ -714,7 +713,7 @@ xfs_fsync(
return XFS_ERROR(EIO);
/* capture size updates in I/O completion before writing the inode. */
- error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
+ error = filemap_fdatawait(VFS_I(ip)->i_mapping);
if (error)
return XFS_ERROR(error);
@@ -1160,7 +1159,6 @@ int
xfs_release(
xfs_inode_t *ip)
{
- bhv_vnode_t *vp = XFS_ITOV(ip);
xfs_mount_t *mp = ip->i_mount;
int error;
@@ -1195,13 +1193,13 @@ xfs_release(
* be exposed to that problem.
*/
truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
- if (truncated && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
+ if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
}
if (ip->i_d.di_nlink != 0) {
if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
- ((ip->i_size > 0) || (VN_CACHED(vp) > 0 ||
+ ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
ip->i_delayed_blks > 0)) &&
(ip->i_df.if_flags & XFS_IFEXTENTS)) &&
(!(ip->i_d.di_flags &
@@ -1227,7 +1225,6 @@ int
xfs_inactive(
xfs_inode_t *ip)
{
- bhv_vnode_t *vp = XFS_ITOV(ip);
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int committed;
@@ -1242,7 +1239,7 @@ xfs_inactive(
* If the inode is already free, then there can be nothing
* to clean up here.
*/
- if (ip->i_d.di_mode == 0 || VN_BAD(vp)) {
+ if (ip->i_d.di_mode == 0 || VN_BAD(VFS_I(ip))) {
ASSERT(ip->i_df.if_real_bytes == 0);
ASSERT(ip->i_df.if_broot_bytes == 0);
return VN_INACTIVE_CACHE;
@@ -1272,7 +1269,7 @@ xfs_inactive(
if (ip->i_d.di_nlink != 0) {
if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
- ((ip->i_size > 0) || (VN_CACHED(vp) > 0 ||
+ ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
ip->i_delayed_blks > 0)) &&
(ip->i_df.if_flags & XFS_IFEXTENTS) &&
(!(ip->i_d.di_flags &
@@ -1536,7 +1533,7 @@ xfs_create(
* Make sure that we have allocated dquot(s) on disk.
*/
error = XFS_QM_DQVOPALLOC(mp, dp,
- current_fsuid(credp), current_fsgid(credp), prid,
+ current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
@@ -1708,111 +1705,6 @@ std_return:
}
#ifdef DEBUG
-/*
- * Some counters to see if (and how often) we are hitting some deadlock
- * prevention code paths.
- */
-
-int xfs_rm_locks;
-int xfs_rm_lock_delays;
-int xfs_rm_attempts;
-#endif
-
-/*
- * The following routine will lock the inodes associated with the
- * directory and the named entry in the directory. The locks are
- * acquired in increasing inode number.
- *
- * If the entry is "..", then only the directory is locked. The
- * vnode ref count will still include that from the .. entry in
- * this case.
- *
- * There is a deadlock we need to worry about. If the locked directory is
- * in the AIL, it might be blocking up the log. The next inode we lock
- * could be already locked by another thread waiting for log space (e.g
- * a permanent log reservation with a long running transaction (see
- * xfs_itruncate_finish)). To solve this, we must check if the directory
- * is in the ail and use lock_nowait. If we can't lock, we need to
- * drop the inode lock on the directory and try again. xfs_iunlock will
- * potentially push the tail if we were holding up the log.
- */
-STATIC int
-xfs_lock_dir_and_entry(
- xfs_inode_t *dp,
- xfs_inode_t *ip) /* inode of entry 'name' */
-{
- int attempts;
- xfs_ino_t e_inum;
- xfs_inode_t *ips[2];
- xfs_log_item_t *lp;
-
-#ifdef DEBUG
- xfs_rm_locks++;
-#endif
- attempts = 0;
-
-again:
- xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
-
- e_inum = ip->i_ino;
-
- xfs_itrace_ref(ip);
-
- /*
- * We want to lock in increasing inum. Since we've already
- * acquired the lock on the directory, we may need to release
- * if if the inum of the entry turns out to be less.
- */
- if (e_inum > dp->i_ino) {
- /*
- * We are already in the right order, so just
- * lock on the inode of the entry.
- * We need to use nowait if dp is in the AIL.
- */
-
- lp = (xfs_log_item_t *)dp->i_itemp;
- if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
- if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
- attempts++;
-#ifdef DEBUG
- xfs_rm_attempts++;
-#endif
-
- /*
- * Unlock dp and try again.
- * xfs_iunlock will try to push the tail
- * if the inode is in the AIL.
- */
-
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
- if ((attempts % 5) == 0) {
- delay(1); /* Don't just spin the CPU */
-#ifdef DEBUG
- xfs_rm_lock_delays++;
-#endif
- }
- goto again;
- }
- } else {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- }
- } else if (e_inum < dp->i_ino) {
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
- ips[0] = ip;
- ips[1] = dp;
- xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
- }
- /* else e_inum == dp->i_ino */
- /* This can happen if we're asked to lock /x/..
- * the entry is "..", which is also the parent directory.
- */
-
- return 0;
-}
-
-#ifdef DEBUG
int xfs_locked_n;
int xfs_small_retries;
int xfs_middle_retries;
@@ -1946,6 +1838,45 @@ again:
#endif
}
+void
+xfs_lock_two_inodes(
+ xfs_inode_t *ip0,
+ xfs_inode_t *ip1,
+ uint lock_mode)
+{
+ xfs_inode_t *temp;
+ int attempts = 0;
+ xfs_log_item_t *lp;
+
+ ASSERT(ip0->i_ino != ip1->i_ino);
+
+ if (ip0->i_ino > ip1->i_ino) {
+ temp = ip0;
+ ip0 = ip1;
+ ip1 = temp;
+ }
+
+ again:
+ xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
+
+ /*
+ * If the first lock we have locked is in the AIL, we must TRY to get
+ * the second lock. If we can't get it, we must release the first one
+ * and try again.
+ */
+ lp = (xfs_log_item_t *)ip0->i_itemp;
+ if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
+ if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
+ xfs_iunlock(ip0, lock_mode);
+ if ((++attempts % 5) == 0)
+ delay(1); /* Don't just spin the CPU */
+ goto again;
+ }
+ } else {
+ xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
+ }
+}
+
int
xfs_remove(
xfs_inode_t *dp,
@@ -2018,9 +1949,7 @@ xfs_remove(
goto out_trans_cancel;
}
- error = xfs_lock_dir_and_entry(dp, ip);
- if (error)
- goto out_trans_cancel;
+ xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
/*
* At this point, we've gotten both the directory and the entry
@@ -2047,9 +1976,6 @@ xfs_remove(
}
}
- /*
- * Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
- */
XFS_BMAP_INIT(&free_list, &first_block);
error = xfs_dir_removename(tp, dp, name, ip->i_ino,
&first_block, &free_list, resblks);
@@ -2155,7 +2081,6 @@ xfs_link(
{
xfs_mount_t *mp = tdp->i_mount;
xfs_trans_t *tp;
- xfs_inode_t *ips[2];
int error;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
@@ -2203,15 +2128,7 @@ xfs_link(
goto error_return;
}
- if (sip->i_ino < tdp->i_ino) {
- ips[0] = sip;
- ips[1] = tdp;
- } else {
- ips[0] = tdp;
- ips[1] = sip;
- }
-
- xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
/*
* Increment vnode ref counts since xfs_trans_commit &
@@ -2352,7 +2269,7 @@ xfs_mkdir(
* Make sure that we have allocated dquot(s) on disk.
*/
error = XFS_QM_DQVOPALLOC(mp, dp,
- current_fsuid(credp), current_fsgid(credp), prid,
+ current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
@@ -2578,7 +2495,7 @@ xfs_symlink(
* Make sure that we have allocated dquot(s) on disk.
*/
error = XFS_QM_DQVOPALLOC(mp, dp,
- current_fsuid(credp), current_fsgid(credp), prid,
+ current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
@@ -2873,14 +2790,13 @@ int
xfs_reclaim(
xfs_inode_t *ip)
{
- bhv_vnode_t *vp = XFS_ITOV(ip);
xfs_itrace_entry(ip);
- ASSERT(!VN_MAPPED(vp));
+ ASSERT(!VN_MAPPED(VFS_I(ip)));
/* bad inode, get out here ASAP */
- if (VN_BAD(vp)) {
+ if (VN_BAD(VFS_I(ip))) {
xfs_ireclaim(ip);
return 0;
}
@@ -2917,7 +2833,7 @@ xfs_reclaim(
XFS_MOUNT_ILOCK(mp);
spin_lock(&ip->i_flags_lock);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
- vn_to_inode(vp)->i_private = NULL;
+ VFS_I(ip)->i_private = NULL;
ip->i_vnode = NULL;
spin_unlock(&ip->i_flags_lock);
list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);
@@ -2933,7 +2849,7 @@ xfs_finish_reclaim(
int sync_mode)
{
xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
- bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
+ struct inode *vp = VFS_I(ip);
if (vp && VN_BAD(vp))
goto reclaim;
@@ -3321,7 +3237,6 @@ xfs_free_file_space(
xfs_off_t len,
int attr_flags)
{
- bhv_vnode_t *vp;
int committed;
int done;
xfs_off_t end_dmi_offset;
@@ -3341,7 +3256,6 @@ xfs_free_file_space(
xfs_trans_t *tp;
int need_iolock = 1;
- vp = XFS_ITOV(ip);
mp = ip->i_mount;
xfs_itrace_entry(ip);
@@ -3378,7 +3292,7 @@ xfs_free_file_space(
rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
ioffset = offset & ~(rounding - 1);
- if (VN_CACHED(vp) != 0) {
+ if (VN_CACHED(VFS_I(ip)) != 0) {
xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
if (error)
diff --git a/include/acpi/acnamesp.h b/include/acpi/acnamesp.h
index 9ed70a050580..c34008507b69 100644
--- a/include/acpi/acnamesp.h
+++ b/include/acpi/acnamesp.h
@@ -182,7 +182,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
*/
u32 acpi_ns_opens_scope(acpi_object_type type);
-void
+acpi_status
acpi_ns_build_external_path(struct acpi_namespace_node *node,
acpi_size size, char *name_buffer);
diff --git a/include/asm-arm/plat-s3c/regs-nand.h b/include/asm-arm/plat-s3c/regs-nand.h
index 09f0b5503f5b..b2caa4bca270 100644
--- a/include/asm-arm/plat-s3c/regs-nand.h
+++ b/include/asm-arm/plat-s3c/regs-nand.h
@@ -11,7 +11,7 @@
*/
#ifndef __ASM_ARM_REGS_NAND
-#define __ASM_ARM_REGS_NAND "$Id: nand.h,v 1.3 2003/12/09 11:36:29 ben Exp $"
+#define __ASM_ARM_REGS_NAND
#define S3C2410_NFREG(x) (x)
diff --git a/include/asm-arm/plat-s3c/regs-timer.h b/include/asm-arm/plat-s3c/regs-timer.h
index b4366ea39677..cc0eedd53e38 100644
--- a/include/asm-arm/plat-s3c/regs-timer.h
+++ b/include/asm-arm/plat-s3c/regs-timer.h
@@ -12,7 +12,7 @@
#ifndef __ASM_ARCH_REGS_TIMER_H
-#define __ASM_ARCH_REGS_TIMER_H "$Id: timer.h,v 1.4 2003/05/06 19:30:50 ben Exp $"
+#define __ASM_ARCH_REGS_TIMER_H
#define S3C_TIMERREG(x) (S3C_VA_TIMER + (x))
#define S3C_TIMERREG2(tmr,reg) S3C_TIMERREG((reg)+0x0c+((tmr)*0x0c))
diff --git a/include/asm-arm/plat-s3c/regs-watchdog.h b/include/asm-arm/plat-s3c/regs-watchdog.h
index 1229f076c0a0..4938492470f7 100644
--- a/include/asm-arm/plat-s3c/regs-watchdog.h
+++ b/include/asm-arm/plat-s3c/regs-watchdog.h
@@ -12,7 +12,7 @@
#ifndef __ASM_ARCH_REGS_WATCHDOG_H
-#define __ASM_ARCH_REGS_WATCHDOG_H "$Id: watchdog.h,v 1.2 2003/04/29 13:31:09 ben Exp $"
+#define __ASM_ARCH_REGS_WATCHDOG_H
#define S3C_WDOGREG(x) ((x) + S3C_VA_WATCHDOG)
diff --git a/include/asm-arm/plat-s3c24xx/s3c2410.h b/include/asm-arm/plat-s3c24xx/s3c2410.h
index 36de0b835873..3cd1ec677b3f 100644
--- a/include/asm-arm/plat-s3c24xx/s3c2410.h
+++ b/include/asm-arm/plat-s3c24xx/s3c2410.h
@@ -21,11 +21,11 @@ extern void s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no);
extern void s3c2410_init_clocks(int xtal);
-extern int s3c2410_baseclk_add(void);
-
#else
#define s3c2410_init_clocks NULL
#define s3c2410_init_uarts NULL
#define s3c2410_map_io NULL
#define s3c2410_init NULL
#endif
+
+extern int s3c2410_baseclk_add(void);
diff --git a/include/asm-blackfin/Kbuild b/include/asm-blackfin/Kbuild
index 71f8fe783258..606ecfdcc962 100644
--- a/include/asm-blackfin/Kbuild
+++ b/include/asm-blackfin/Kbuild
@@ -1,3 +1,3 @@
include include/asm-generic/Kbuild.asm
-header-y += fixed_code.h
+unifdef-y += fixed_code.h
diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h
index 320aa5e167e9..7ba70de66f2b 100644
--- a/include/asm-blackfin/bfin-global.h
+++ b/include/asm-blackfin/bfin-global.h
@@ -56,37 +56,20 @@ extern void dump_bfin_process(struct pt_regs *regs);
extern void dump_bfin_mem(struct pt_regs *regs);
extern void dump_bfin_trace_buffer(void);
+/* init functions only */
extern int init_arch_irq(void);
-extern void bfin_reset(void);
-extern void _cplb_hdr(void);
-/* Blackfin cache functions */
extern void bfin_icache_init(void);
extern void bfin_dcache_init(void);
-extern int read_iloc(void);
-extern int bfin_console_init(void);
-extern asmlinkage void lower_to_irq14(void);
-extern asmlinkage void bfin_return_from_exception(void);
extern void init_exception_vectors(void);
-extern void init_dma(void);
extern void program_IAR(void);
-extern void evt14_softirq(void);
+
+extern void bfin_reset(void);
+extern asmlinkage void lower_to_irq14(void);
+extern asmlinkage void bfin_return_from_exception(void);
+extern asmlinkage void evt14_softirq(void);
extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
-extern void bfin_gpio_interrupt_setup(int irq, int irq_pfx, int type);
extern int bfin_internal_set_wake(unsigned int irq, unsigned int state);
-extern asmlinkage void finish_atomic_sections (struct pt_regs *regs);
-extern char fixed_code_start;
-extern char fixed_code_end;
-extern int atomic_xchg32(void);
-extern int atomic_cas32(void);
-extern int atomic_add32(void);
-extern int atomic_sub32(void);
-extern int atomic_ior32(void);
-extern int atomic_and32(void);
-extern int atomic_xor32(void);
-extern void safe_user_instruction(void);
-extern void sigreturn_stub(void);
-
extern void *l1_data_A_sram_alloc(size_t);
extern void *l1_data_B_sram_alloc(size_t);
extern void *l1_inst_sram_alloc(size_t);
@@ -110,11 +93,10 @@ extern void *sram_alloc_with_lsl(size_t, unsigned long);
extern int sram_free_with_lsl(const void*);
extern const char bfin_board_name[];
-extern unsigned long wall_jiffies;
extern unsigned long bfin_sic_iwr[];
+extern unsigned vr_wakeup;
extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */
-extern struct file_operations dpmc_fops;
extern unsigned long _ramstart, _ramend, _rambase;
extern unsigned long memory_start, memory_end, physical_mem_end;
extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
@@ -122,8 +104,12 @@ extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
_stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
_ebss_l2[], _l2_lma_start[];
-#ifdef CONFIG_MTD_UCLINUX
+/* only used when CONFIG_MTD_UCLINUX */
extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
+
+#ifdef CONFIG_BFIN_ICACHE_LOCK
+extern void cache_grab_lock(int way);
+extern void cache_lock(int way);
#endif
#endif
diff --git a/include/asm-blackfin/dpmc.h b/include/asm-blackfin/dpmc.h
index de28e6e018b3..96e8208f929a 100644
--- a/include/asm-blackfin/dpmc.h
+++ b/include/asm-blackfin/dpmc.h
@@ -11,7 +11,6 @@
#ifndef __ASSEMBLY__
void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
-void deep_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void hibernate_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void do_hibernate(int wakeup);
diff --git a/include/asm-blackfin/fixed_code.h b/include/asm-blackfin/fixed_code.h
index 37db66c7030d..32c4d495d847 100644
--- a/include/asm-blackfin/fixed_code.h
+++ b/include/asm-blackfin/fixed_code.h
@@ -1,6 +1,28 @@
/* This file defines the fixed addresses where userspace programs can find
atomic code sequences. */
+#ifndef __BFIN_ASM_FIXED_CODE_H__
+#define __BFIN_ASM_FIXED_CODE_H__
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
+#include <linux/ptrace.h>
+extern asmlinkage void finish_atomic_sections(struct pt_regs *regs);
+extern char fixed_code_start;
+extern char fixed_code_end;
+extern int atomic_xchg32(void);
+extern int atomic_cas32(void);
+extern int atomic_add32(void);
+extern int atomic_sub32(void);
+extern int atomic_ior32(void);
+extern int atomic_and32(void);
+extern int atomic_xor32(void);
+extern void safe_user_instruction(void);
+extern void sigreturn_stub(void);
+#endif
+#endif
+
#define FIXED_CODE_START 0x400
#define SIGRETURN_STUB 0x400
@@ -20,3 +42,5 @@
#define SAFE_USER_INSTRUCTION 0x480
#define FIXED_CODE_END 0x490
+
+#endif
diff --git a/include/asm-blackfin/mach-bf527/mem_map.h b/include/asm-blackfin/mach-bf527/mem_map.h
index 193082deaa4e..ef46dc991cd4 100644
--- a/include/asm-blackfin/mach-bf527/mem_map.h
+++ b/include/asm-blackfin/mach-bf527/mem_map.h
@@ -89,6 +89,11 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE */
+/* Level 2 Memory - none */
+
+#define L2_START 0
+#define L2_LENGTH 0
+
/* Scratch Pad Memory */
#define L1_SCRATCH_START 0xFFB00000
diff --git a/include/asm-blackfin/mach-bf533/mem_init.h b/include/asm-blackfin/mach-bf533/mem_init.h
index 995c06b2b1ef..ed2034bf10ec 100644
--- a/include/asm-blackfin/mach-bf533/mem_init.h
+++ b/include/asm-blackfin/mach-bf533/mem_init.h
@@ -47,7 +47,7 @@
#define SDRAM_tRCD TRCD_2
#define SDRAM_tWR TWR_2
#endif
-#if (CONFIG_SCLK_HZ > 8955223) && (CONFIG_SCLK_HZ <= 104477612)
+#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
#define SDRAM_tRP TRP_2
#define SDRAM_tRP_num 2
#define SDRAM_tRAS TRAS_5
diff --git a/include/asm-blackfin/mach-bf533/mem_map.h b/include/asm-blackfin/mach-bf533/mem_map.h
index bd30b6f3be00..581fc6eea789 100644
--- a/include/asm-blackfin/mach-bf533/mem_map.h
+++ b/include/asm-blackfin/mach-bf533/mem_map.h
@@ -158,6 +158,11 @@
#endif
+/* Level 2 Memory - none */
+
+#define L2_START 0
+#define L2_LENGTH 0
+
/* Scratch Pad Memory */
#define L1_SCRATCH_START 0xFFB00000
diff --git a/include/asm-blackfin/mach-bf537/mem_map.h b/include/asm-blackfin/mach-bf537/mem_map.h
index 5c6726d6f3b1..5078b669431f 100644
--- a/include/asm-blackfin/mach-bf537/mem_map.h
+++ b/include/asm-blackfin/mach-bf537/mem_map.h
@@ -166,6 +166,11 @@
#endif
+/* Level 2 Memory - none */
+
+#define L2_START 0
+#define L2_LENGTH 0
+
/* Scratch Pad Memory */
#define L1_SCRATCH_START 0xFFB00000
diff --git a/include/asm-blackfin/mach-common/cdef_LPBlackfin.h b/include/asm-blackfin/mach-common/cdef_LPBlackfin.h
index ede210eca4ec..d39c396f850d 100644
--- a/include/asm-blackfin/mach-common/cdef_LPBlackfin.h
+++ b/include/asm-blackfin/mach-common/cdef_LPBlackfin.h
@@ -39,11 +39,7 @@
#define bfin_read_SRAM_BASE_ADDRESS() bfin_read32(SRAM_BASE_ADDRESS)
#define bfin_write_SRAM_BASE_ADDRESS(val) bfin_write32(SRAM_BASE_ADDRESS,val)
#define bfin_read_DMEM_CONTROL() bfin_read32(DMEM_CONTROL)
-#if ANOMALY_05000125
-extern void bfin_write_DMEM_CONTROL(unsigned int val);
-#else
#define bfin_write_DMEM_CONTROL(val) bfin_write32(DMEM_CONTROL,val)
-#endif
#define bfin_read_DCPLB_STATUS() bfin_read32(DCPLB_STATUS)
#define bfin_write_DCPLB_STATUS(val) bfin_write32(DCPLB_STATUS,val)
#define bfin_read_DCPLB_FAULT_ADDR() bfin_read32(DCPLB_FAULT_ADDR)
@@ -129,11 +125,7 @@ extern void bfin_write_DMEM_CONTROL(unsigned int val);
#define DTEST_DATA3 0xFFE0040C
*/
#define bfin_read_IMEM_CONTROL() bfin_read32(IMEM_CONTROL)
-#if ANOMALY_05000125
-extern void bfin_write_IMEM_CONTROL(unsigned int val);
-#else
#define bfin_write_IMEM_CONTROL(val) bfin_write32(IMEM_CONTROL,val)
-#endif
#define bfin_read_ICPLB_STATUS() bfin_read32(ICPLB_STATUS)
#define bfin_write_ICPLB_STATUS(val) bfin_write32(ICPLB_STATUS,val)
#define bfin_read_ICPLB_FAULT_ADDR() bfin_read32(ICPLB_FAULT_ADDR)
diff --git a/include/asm-blackfin/unistd.h b/include/asm-blackfin/unistd.h
index 42955d0c439b..1e57b636e0bc 100644
--- a/include/asm-blackfin/unistd.h
+++ b/include/asm-blackfin/unistd.h
@@ -372,8 +372,14 @@
#define __NR_semtimedop 357
#define __NR_timerfd_settime 358
#define __NR_timerfd_gettime 359
+#define __NR_signalfd4 360
+#define __NR_eventfd2 361
+#define __NR_epoll_create1 362
+#define __NR_dup3 363
+#define __NR_pipe2 364
+#define __NR_inotify_init1 365
-#define __NR_syscall 360
+#define __NR_syscall 366
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h
index 864181385579..15828b2d663c 100644
--- a/include/asm-generic/ioctl.h
+++ b/include/asm-generic/ioctl.h
@@ -68,12 +68,16 @@
((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT))
+#ifdef __KERNEL__
/* provoke compile error for invalid uses of size argument */
extern unsigned int __invalid_size_argument_for_IOC;
#define _IOC_TYPECHECK(t) \
((sizeof(t) == sizeof(t[1]) && \
sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
sizeof(t) : __invalid_size_argument_for_IOC)
+#else
+#define _IOC_TYPECHECK(t) (sizeof(t))
+#endif
/* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h
index cdbab43b7d3a..4314892aaebb 100644
--- a/include/asm-mips/kexec.h
+++ b/include/asm-mips/kexec.h
@@ -16,7 +16,7 @@
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
-#define KEXEC_CONTROL_CODE_SIZE 4096
+#define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_MIPS
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index 22aa58ca1991..dcc812067394 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -31,9 +31,6 @@
#define ALIAS_TABLE_ENTRY_SIZE 2
#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
-/* helper macros */
-#define LOW_U32(x) ((x) & ((1ULL << 32)-1))
-
/* Length of the MMIO region for the AMD IOMMU */
#define MMIO_REGION_LENGTH 0x4000
@@ -69,6 +66,9 @@
#define MMIO_EVT_TAIL_OFFSET 0x2018
#define MMIO_STATUS_OFFSET 0x2020
+/* MMIO status bits */
+#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
+
/* feature control bits */
#define CONTROL_IOMMU_EN 0x00ULL
#define CONTROL_HT_TUN_EN 0x01ULL
@@ -89,6 +89,7 @@
#define CMD_INV_IOMMU_PAGES 0x03
#define CMD_COMPL_WAIT_STORE_MASK 0x01
+#define CMD_COMPL_WAIT_INT_MASK 0x02
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
@@ -99,6 +100,7 @@
#define DEV_ENTRY_TRANSLATION 0x01
#define DEV_ENTRY_IR 0x3d
#define DEV_ENTRY_IW 0x3e
+#define DEV_ENTRY_NO_PAGE_FAULT 0x62
#define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index a0095191c02e..91c7d03e65bc 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -228,7 +228,7 @@ static inline void atomic64_add(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
- : "ir" (i), "m" (v->counter));
+ : "er" (i), "m" (v->counter));
}
/**
@@ -242,7 +242,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter)
- : "ir" (i), "m" (v->counter));
+ : "er" (i), "m" (v->counter));
}
/**
@@ -260,7 +260,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
: "=m" (v->counter), "=qm" (c)
- : "ir" (i), "m" (v->counter) : "memory");
+ : "er" (i), "m" (v->counter) : "memory");
return c;
}
@@ -341,7 +341,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
: "=m" (v->counter), "=qm" (c)
- : "ir" (i), "m" (v->counter) : "memory");
+ : "er" (i), "m" (v->counter) : "memory");
return c;
}
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index 7ed2bd7a7f51..d4f2b0abe929 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
-extern void *efi_ioremap(unsigned long addr, unsigned long size);
+extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
#endif /* CONFIG_X86_32 */
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index bb06027fc83e..2c1cda0b8a86 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -50,6 +50,7 @@ extern int geode_get_dev_base(unsigned int dev);
#define MSR_PIC_YSEL_HIGH 0x51400021
#define MSR_PIC_ZSEL_LOW 0x51400022
#define MSR_PIC_ZSEL_HIGH 0x51400023
+#define MSR_PIC_IRQM_LPC 0x51400025
#define MSR_MFGPT_IRQ 0x51400028
#define MSR_MFGPT_NR 0x51400029
@@ -237,7 +238,7 @@ static inline u16 geode_mfgpt_read(int timer, u16 reg)
}
extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
-extern int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable);
+extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
extern int geode_mfgpt_alloc_timer(int timer, int domain);
#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index 77ba51df5668..edd0b95f14d0 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -98,9 +98,17 @@ extern void (*const interrupt[NR_IRQS])(void);
#else
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern spinlock_t vector_lock;
#endif
-extern void setup_vector_irq(int cpu);
+
+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
+extern void lock_vector_lock(void);
+extern void unlock_vector_lock(void);
+extern void __setup_vector_irq(int cpu);
+#else
+static inline void lock_vector_lock(void) {}
+static inline void unlock_vector_lock(void) {}
+static inline void __setup_vector_irq(int cpu) {}
+#endif
#endif /* !ASSEMBLY_ */
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 96fa8449ff11..56d00e31aec0 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/regset.h>
+#include <linux/hardirq.h>
#include <asm/asm.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
@@ -62,8 +63,6 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
#else
: [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
#endif
- if (unlikely(err))
- init_fpu(current);
return err;
}
@@ -236,6 +235,37 @@ static inline void kernel_fpu_end(void)
preempt_enable();
}
+/*
+ * Some instructions like VIA's padlock instructions generate a spurious
+ * DNA fault but don't modify SSE registers. And these instructions
+ * get used from interrupt context aswell. To prevent these kernel instructions
+ * in interrupt context interact wrongly with other user/kernel fpu usage, we
+ * should use them only in the context of irq_ts_save/restore()
+ */
+static inline int irq_ts_save(void)
+{
+ /*
+ * If we are in process context, we are ok to take a spurious DNA fault.
+ * Otherwise, doing clts() in process context require pre-emption to
+ * be disabled or some heavy lifting like kernel_fpu_begin()
+ */
+ if (!in_interrupt())
+ return 0;
+
+ if (read_cr0() & X86_CR0_TS) {
+ clts();
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void irq_ts_restore(int TS_state)
+{
+ if (TS_state)
+ stts();
+}
+
#ifdef CONFIG_X86_64
static inline void save_init_fpu(struct task_struct *tsk)
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
index bf5d629b3a39..0f954dc89cb3 100644
--- a/include/asm-x86/io.h
+++ b/include/asm-x86/io.h
@@ -21,7 +21,7 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
-{ type ret; asm volatile("mov" size " %1,%0":"=" reg (ret) \
+{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
:"m" (*(volatile type __force *)addr) barrier); return ret; }
#define build_mmio_write(name, size, type, reg, barrier) \
@@ -29,13 +29,13 @@ static inline void name(type val, volatile void __iomem *addr) \
{ asm volatile("mov" size " %0,%1": :reg (val), \
"m" (*(volatile type __force *)addr) barrier); }
-build_mmio_read(readb, "b", unsigned char, "q", :"memory")
-build_mmio_read(readw, "w", unsigned short, "r", :"memory")
-build_mmio_read(readl, "l", unsigned int, "r", :"memory")
+build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
+build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
+build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
-build_mmio_read(__readb, "b", unsigned char, "q", )
-build_mmio_read(__readw, "w", unsigned short, "r", )
-build_mmio_read(__readl, "l", unsigned int, "r", )
+build_mmio_read(__readb, "b", unsigned char, "=q", )
+build_mmio_read(__readw, "w", unsigned short, "=r", )
+build_mmio_read(__readl, "l", unsigned int, "=r", )
build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
build_mmio_write(writew, "w", unsigned short, "r", :"memory")
@@ -59,8 +59,8 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
#define mmiowb() barrier()
#ifdef CONFIG_X86_64
-build_mmio_read(readq, "q", unsigned long, "r", :"memory")
-build_mmio_read(__readq, "q", unsigned long, "r", )
+build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
+build_mmio_read(__readq, "q", unsigned long, "=r", )
build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
build_mmio_write(__writeq, "q", unsigned long, "r", )
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
index 90b1d1f12f08..a48c7f2dbdc0 100644
--- a/include/asm-x86/irq_vectors.h
+++ b/include/asm-x86/irq_vectors.h
@@ -76,6 +76,7 @@
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
+#define UV_BAU_MESSAGE 0xf8
#define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
@@ -109,7 +110,15 @@
#define LAST_VM86_IRQ 15
#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
-#if !defined(CONFIG_X86_VOYAGER)
+#ifdef CONFIG_X86_64
+# if NR_CPUS < MAX_IO_APICS
+# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
+# else
+# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
+# endif
+# define NR_IRQ_VECTORS NR_IRQS
+
+#elif !defined(CONFIG_X86_VOYAGER)
# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS)
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index c0e52a14fd4d..4246ab7dc988 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -41,6 +41,10 @@
# define PAGES_NR 17
#endif
+#ifdef CONFIG_X86_32
+# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/string.h>
@@ -63,7 +67,7 @@
/* Maximum address we can use for the control code buffer */
# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
-# define KEXEC_CONTROL_CODE_SIZE 4096
+# define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
# define KEXEC_ARCH KEXEC_ARCH_386
@@ -79,7 +83,7 @@
# define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
/* Allocate one page for the pdp and the second for the code */
-# define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL)
+# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
/* The native architecture */
# define KEXEC_ARCH KEXEC_ARCH_X86_64
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h
index c1682b542daf..90bc4108a4fd 100644
--- a/include/asm-x86/mman.h
+++ b/include/asm-x86/mman.h
@@ -12,6 +12,7 @@
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
index 95beda07c6fa..e293ab81e850 100644
--- a/include/asm-x86/mmconfig.h
+++ b/include/asm-x86/mmconfig.h
@@ -3,7 +3,7 @@
#ifdef CONFIG_PCI_MMCONFIG
extern void __cpuinit fam10h_check_enable_mmcfg(void);
-extern void __init check_enable_amd_mmconf_dmi(void);
+extern void __cpuinit check_enable_amd_mmconf_dmi(void);
#else
static inline void fam10h_check_enable_mmcfg(void) { }
static inline void check_enable_amd_mmconf_dmi(void) { }
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index b2298a227567..5862e6460658 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -97,10 +97,16 @@ static inline int pfn_valid(int pfn)
reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
#define alloc_bootmem(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_nopanic(x) \
+ __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
+ __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_pages_nopanic(x) \
+ __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
+ __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 4e91ee1e37aa..f643a3a92da0 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -182,7 +182,7 @@ do { \
DEFINE_PER_CPU(_type, _name) = _initvalue; \
__typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
{ [0 ... NR_CPUS-1] = _initvalue }; \
- __typeof__(_type) *_name##_early_ptr = _name##_early_map
+ __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name)
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index ac5fff4cc58a..549144d03d99 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -151,7 +151,7 @@ static inline void native_pgd_clear(pgd_t *pgd)
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
-#define MODULES_END _AC(0xfffffffffff00000, UL)
+#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#ifndef __ASSEMBLY__
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 5f58da401b43..4df3e2f6fb56 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -728,6 +728,29 @@ extern unsigned long boot_option_idle_override;
extern unsigned long idle_halt;
extern unsigned long idle_nomwait;
+/*
+ * on systems with caches, caches must be flashed as the absolute
+ * last instruction before going into a suspended halt. Otherwise,
+ * dirty data can linger in the cache and become stale on resume,
+ * leading to strange errors.
+ *
+ * perform a variety of operations to guarantee that the compiler
+ * will not reorder instructions. wbinvd itself is serializing
+ * so the processor will not reorder.
+ *
+ * Systems without cache can just go into halt.
+ */
+static inline void wbinvd_halt(void)
+{
+ mb();
+ /* check for clflush to determine if wbinvd is legal */
+ if (cpu_has_clflush)
+ asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
+ else
+ while (1)
+ halt();
+}
+
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 4f9a9861799a..e39c790dbfd2 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -65,7 +65,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
- return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
+ return (((tmp >> 8) - tmp) & 0xff) > 1;
}
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
@@ -127,7 +127,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
- return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
+ return (((tmp >> 16) - tmp) & 0xffff) > 1;
}
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
diff --git a/include/asm-x86/uv/uv_bau.h b/include/asm-x86/uv/uv_bau.h
index 91ac0dfb7588..610b6b308e93 100644
--- a/include/asm-x86/uv/uv_bau.h
+++ b/include/asm-x86/uv/uv_bau.h
@@ -40,11 +40,6 @@
#define UV_ACTIVATION_DESCRIPTOR_SIZE 32
#define UV_DISTRIBUTION_SIZE 256
#define UV_SW_ACK_NPENDING 8
-#define UV_BAU_MESSAGE 200
-/*
- * Messaging irq; see irq_64.h and include/asm-x86/hw_irq_64.h
- * To be dynamically allocated in the future
- */
#define UV_NET_ENDPOINT_INTD 0x38
#define UV_DESC_BASE_PNODE_SHIFT 49
#define UV_PAYLOADQ_PNODE_SHIFT 49
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d12498ec8a4e..ee48ef8fb2ea 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -101,6 +101,24 @@ static inline int crypto_ahash_digest(struct ahash_request *req)
return crt->digest(req);
}
+static inline int crypto_ahash_init(struct ahash_request *req)
+{
+ struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
+ return crt->init(req);
+}
+
+static inline int crypto_ahash_update(struct ahash_request *req)
+{
+ struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
+ return crt->update(req);
+}
+
+static inline int crypto_ahash_final(struct ahash_request *req)
+{
+ struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
+ return crt->final(req);
+}
+
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a26f565e8189..7d970678f940 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -250,6 +250,8 @@ unifdef-y += isdn.h
unifdef-y += isdnif.h
unifdef-y += isdn_divertif.h
unifdef-y += isdn_ppp.h
+unifdef-y += ivtv.h
+unifdef-y += ivtvfb.h
unifdef-y += joystick.h
unifdef-y += kdev_t.h
unifdef-y += kd.h
@@ -356,6 +358,7 @@ unifdef-y += virtio_balloon.h
unifdef-y += virtio_console.h
unifdef-y += virtio_pci.h
unifdef-y += virtio_ring.h
+unifdef-y += virtio_rng.h
unifdef-y += vt.h
unifdef-y += wait.h
unifdef-y += wanrouter.h
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 972b12bcfb36..2b8df8b420fd 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -30,6 +30,8 @@
#ifndef _AGP_BACKEND_H
#define _AGP_BACKEND_H 1
+#include <linux/list.h>
+
enum chipset_type {
NOT_SUPPORTED,
SUPPORTED,
@@ -78,6 +80,8 @@ struct agp_memory {
bool is_bound;
bool is_flushed;
bool vmalloc_flag;
+ /* list of agp_memory mapped to the aperture */
+ struct list_head mapped_list;
};
#define AGP_NORMAL_MEMORY 0
@@ -96,6 +100,7 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t,
extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
extern int agp_bind_memory(struct agp_memory *, off_t);
extern int agp_unbind_memory(struct agp_memory *);
+extern int agp_rebind_memory(void);
extern void agp_enable(struct agp_bridge_data *, u32);
extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
extern void agp_backend_release(struct agp_bridge_data *);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 1abfe664c444..89781fd48859 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -110,6 +110,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
+extern int bitmap_scnprintf_len(unsigned int nr_bits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 652470b687c9..95837bfb5256 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -97,10 +97,14 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_nopanic(x) \
+ __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_pages_nopanic(x) \
+ __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_low(x, PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h
new file mode 100644
index 000000000000..29f002d73d98
--- /dev/null
+++ b/include/linux/byteorder.h
@@ -0,0 +1,372 @@
+#ifndef _LINUX_BYTEORDER_H
+#define _LINUX_BYTEORDER_H
+
+#include <linux/types.h>
+#include <linux/swab.h>
+
+#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN)
+# error Fix asm/byteorder.h to define one endianness
+#endif
+
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+# error Fix asm/byteorder.h to define arch endianness
+#endif
+
+#ifdef __LITTLE_ENDIAN
+# undef __LITTLE_ENDIAN
+# define __LITTLE_ENDIAN 1234
+#endif
+
+#ifdef __BIG_ENDIAN
+# undef __BIG_ENDIAN
+# define __BIG_ENDIAN 4321
+#endif
+
+#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD)
+# define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD)
+# define __BIG_ENDIAN_BITFIELD
+#endif
+
+#ifdef __LITTLE_ENDIAN
+# define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+# define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+# define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+# define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+# define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+# define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+
+# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+# define __cpu_to_be16(x) ((__force __be16)__swab16(x))
+# define __cpu_to_be32(x) ((__force __be32)__swab32(x))
+# define __cpu_to_be64(x) ((__force __be64)__swab64(x))
+#endif
+
+#ifdef __BIG_ENDIAN
+# define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
+# define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+# define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+# define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
+# define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+# define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
+
+# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
+# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+# define __cpu_to_le16(x) ((__force __le16)__swab16(x))
+# define __cpu_to_le32(x) ((__force __le32)__swab32(x))
+# define __cpu_to_le64(x) ((__force __le64)__swab64(x))
+#endif
+
+/*
+ * These helpers could be phased out over time as the base version
+ * handles constant folding.
+ */
+#define __constant_htonl(x) __cpu_to_be32(x)
+#define __constant_ntohl(x) __be32_to_cpu(x)
+#define __constant_htons(x) __cpu_to_be16(x)
+#define __constant_ntohs(x) __be16_to_cpu(x)
+
+#define __constant_le16_to_cpu(x) __le16_to_cpu(x)
+#define __constant_le32_to_cpu(x) __le32_to_cpu(x)
+#define __constant_le64_to_cpu(x) __le64_to_cpu(x)
+#define __constant_be16_to_cpu(x) __be16_to_cpu(x)
+#define __constant_be32_to_cpu(x) __be32_to_cpu(x)
+#define __constant_be64_to_cpu(x) __be64_to_cpu(x)
+
+#define __constant_cpu_to_le16(x) __cpu_to_le16(x)
+#define __constant_cpu_to_le32(x) __cpu_to_le32(x)
+#define __constant_cpu_to_le64(x) __cpu_to_le64(x)
+#define __constant_cpu_to_be16(x) __cpu_to_be16(x)
+#define __constant_cpu_to_be32(x) __cpu_to_be32(x)
+#define __constant_cpu_to_be64(x) __cpu_to_be64(x)
+
+static inline void __le16_to_cpus(__u16 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab16s(p);
+#endif
+}
+
+static inline void __cpu_to_le16s(__u16 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab16s(p);
+#endif
+}
+
+static inline void __le32_to_cpus(__u32 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab32s(p);
+#endif
+}
+
+static inline void __cpu_to_le32s(__u32 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab32s(p);
+#endif
+}
+
+static inline void __le64_to_cpus(__u64 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab64s(p);
+#endif
+}
+
+static inline void __cpu_to_le64s(__u64 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab64s(p);
+#endif
+}
+
+static inline void __be16_to_cpus(__u16 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab16s(p);
+#endif
+}
+
+static inline void __cpu_to_be16s(__u16 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab16s(p);
+#endif
+}
+
+static inline void __be32_to_cpus(__u32 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab32s(p);
+#endif
+}
+
+static inline void __cpu_to_be32s(__u32 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab32s(p);
+#endif
+}
+
+static inline void __be64_to_cpus(__u64 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab64s(p);
+#endif
+}
+
+static inline void __cpu_to_be64s(__u64 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab64s(p);
+#endif
+}
+
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __u16)*p;
+#else
+ return __swab16p((__force __u16 *)p);
+#endif
+}
+
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __u32)*p;
+#else
+ return __swab32p((__force __u32 *)p);
+#endif
+}
+
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __u64)*p;
+#else
+ return __swab64p((__force __u64 *)p);
+#endif
+}
+
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __le16)*p;
+#else
+ return (__force __le16)__swab16p(p);
+#endif
+}
+
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __le32)*p;
+#else
+ return (__force __le32)__swab32p(p);
+#endif
+}
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __le64)*p;
+#else
+ return (__force __le64)__swab64p(p);
+#endif
+}
+
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __u16)*p;
+#else
+ return __swab16p((__force __u16 *)p);
+#endif
+}
+
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __u32)*p;
+#else
+ return __swab32p((__force __u32 *)p);
+#endif
+}
+
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __u64)*p;
+#else
+ return __swab64p((__force __u64 *)p);
+#endif
+}
+
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be16)*p;
+#else
+ return (__force __be16)__swab16p(p);
+#endif
+}
+
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)*p;
+#else
+ return (__force __be32)__swab32p(p);
+#endif
+}
+
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)*p;
+#else
+ return (__force __be64)__swab64p(p);
+#endif
+}
+
+#ifdef __KERNEL__
+
+# define le16_to_cpu __le16_to_cpu
+# define le32_to_cpu __le32_to_cpu
+# define le64_to_cpu __le64_to_cpu
+# define be16_to_cpu __be16_to_cpu
+# define be32_to_cpu __be32_to_cpu
+# define be64_to_cpu __be64_to_cpu
+# define cpu_to_le16 __cpu_to_le16
+# define cpu_to_le32 __cpu_to_le32
+# define cpu_to_le64 __cpu_to_le64
+# define cpu_to_be16 __cpu_to_be16
+# define cpu_to_be32 __cpu_to_be32
+# define cpu_to_be64 __cpu_to_be64
+
+# define le16_to_cpup __le16_to_cpup
+# define le32_to_cpup __le32_to_cpup
+# define le64_to_cpup __le64_to_cpup
+# define be16_to_cpup __be16_to_cpup
+# define be32_to_cpup __be32_to_cpup
+# define be64_to_cpup __be64_to_cpup
+# define cpu_to_le16p __cpu_to_le16p
+# define cpu_to_le32p __cpu_to_le32p
+# define cpu_to_le64p __cpu_to_le64p
+# define cpu_to_be16p __cpu_to_be16p
+# define cpu_to_be32p __cpu_to_be32p
+# define cpu_to_be64p __cpu_to_be64p
+
+# define le16_to_cpus __le16_to_cpus
+# define le32_to_cpus __le32_to_cpus
+# define le64_to_cpus __le64_to_cpus
+# define be16_to_cpus __be16_to_cpus
+# define be32_to_cpus __be32_to_cpus
+# define be64_to_cpus __be64_to_cpus
+# define cpu_to_le16s __cpu_to_le16s
+# define cpu_to_le32s __cpu_to_le32s
+# define cpu_to_le64s __cpu_to_le64s
+# define cpu_to_be16s __cpu_to_be16s
+# define cpu_to_be32s __cpu_to_be32s
+# define cpu_to_be64s __cpu_to_be64s
+
+/*
+ * They have to be macros in order to do the constant folding
+ * correctly - if the argument passed into a inline function
+ * it is no longer constant according to gcc..
+ */
+# undef ntohl
+# undef ntohs
+# undef htonl
+# undef htons
+
+# define ___htonl(x) __cpu_to_be32(x)
+# define ___htons(x) __cpu_to_be16(x)
+# define ___ntohl(x) __be32_to_cpu(x)
+# define ___ntohs(x) __be16_to_cpu(x)
+
+# define htonl(x) ___htonl(x)
+# define ntohl(x) ___ntohl(x)
+# define htons(x) ___htons(x)
+# define ntohs(x) ___ntohs(x)
+
+static inline void le16_add_cpu(__le16 *var, u16 val)
+{
+ *var = cpu_to_le16(le16_to_cpup(var) + val);
+}
+
+static inline void le32_add_cpu(__le32 *var, u32 val)
+{
+ *var = cpu_to_le32(le32_to_cpup(var) + val);
+}
+
+static inline void le64_add_cpu(__le64 *var, u64 val)
+{
+ *var = cpu_to_le64(le64_to_cpup(var) + val);
+}
+
+static inline void be16_add_cpu(__be16 *var, u16 val)
+{
+ *var = cpu_to_be16(be16_to_cpup(var) + val);
+}
+
+static inline void be32_add_cpu(__be32 *var, u32 val)
+{
+ *var = cpu_to_be32(be32_to_cpup(var) + val);
+}
+
+static inline void be64_add_cpu(__be64 *var, u64 val)
+{
+ *var = cpu_to_be64(be64_to_cpup(var) + val);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_BYTEORDER_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 02673846d205..9d1fe30b6f6c 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -503,8 +503,19 @@ extern const kernel_cap_t __cap_init_eff_set;
kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);
-int capable(int cap);
-int __capable(struct task_struct *t, int cap);
+/**
+ * has_capability - Determine if a task has a superior capability available
+ * @t: The task in question
+ * @cap: The capability to be tested for
+ *
+ * Return true if the specified task has the given superior capability
+ * currently in effect, false if not.
+ *
+ * Note that this does not set PF_SUPERPRIV on the task.
+ */
+#define has_capability(t, cap) (security_capable((t), (cap)) == 0)
+
+extern int capable(int cap);
#endif /* __KERNEL__ */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index d2961b66d53d..02ef8835999c 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -49,10 +49,13 @@ extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout);
+extern bool try_wait_for_completion(struct completion *x);
+extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
#define INIT_COMPLETION(x) ((x).done = 0)
+
#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
new file mode 100644
index 000000000000..b69222cc1fd2
--- /dev/null
+++ b/include/linux/cred.h
@@ -0,0 +1,50 @@
+/* Credentials management
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_CRED_H
+#define _LINUX_CRED_H
+
+#define get_current_user() (get_uid(current->user))
+
+#define task_uid(task) ((task)->uid)
+#define task_gid(task) ((task)->gid)
+#define task_euid(task) ((task)->euid)
+#define task_egid(task) ((task)->egid)
+
+#define current_uid() (current->uid)
+#define current_gid() (current->gid)
+#define current_euid() (current->euid)
+#define current_egid() (current->egid)
+#define current_suid() (current->suid)
+#define current_sgid() (current->sgid)
+#define current_fsuid() (current->fsuid)
+#define current_fsgid() (current->fsgid)
+#define current_cap() (current->cap_effective)
+
+#define current_uid_gid(_uid, _gid) \
+do { \
+ *(_uid) = current->uid; \
+ *(_gid) = current->gid; \
+} while(0)
+
+#define current_euid_egid(_uid, _gid) \
+do { \
+ *(_uid) = current->euid; \
+ *(_gid) = current->egid; \
+} while(0)
+
+#define current_fsuid_fsgid(_uid, _gid) \
+do { \
+ *(_uid) = current->fsuid; \
+ *(_gid) = current->fsgid; \
+} while(0)
+
+#endif /* _LINUX_CRED_H */
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h
index acbdbcc16051..6e199c8dfacc 100644
--- a/include/linux/firmware-map.h
+++ b/include/linux/firmware-map.h
@@ -24,34 +24,8 @@
*/
#ifdef CONFIG_FIRMWARE_MEMMAP
-/**
- * Adds a firmware mapping entry. This function uses kmalloc() for memory
- * allocation. Use firmware_map_add_early() if you want to use the bootmem
- * allocator.
- *
- * That function must be called before late_initcall.
- *
- * @start: Start of the memory range.
- * @end: End of the memory range (inclusive).
- * @type: Type of the memory range.
- *
- * Returns 0 on success, or -ENOMEM if no memory could be allocated.
- */
int firmware_map_add(resource_size_t start, resource_size_t end,
const char *type);
-
-/**
- * Adds a firmware mapping entry. This function uses the bootmem allocator
- * for memory allocation. Use firmware_map_add() if you want to use kmalloc().
- *
- * That function must be called before late_initcall.
- *
- * @start: Start of the memory range.
- * @end: End of the memory range (inclusive).
- * @type: Type of the memory range.
- *
- * Returns 0 on success, or -ENOMEM if no memory could be allocated.
- */
int firmware_map_add_early(resource_size_t start, resource_size_t end,
const char *type);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f368d041e02d..bb384068272e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -98,6 +98,27 @@ static inline void tracer_disable(void)
#endif
}
+/* Ftrace disable/restore without lock. Some synchronization mechanism
+ * must be used to prevent ftrace_enabled to be changed between
+ * disable/restore. */
+static inline int __ftrace_enabled_save(void)
+{
+#ifdef CONFIG_FTRACE
+ int saved_ftrace_enabled = ftrace_enabled;
+ ftrace_enabled = 0;
+ return saved_ftrace_enabled;
+#else
+ return 0;
+#endif
+}
+
+static inline void __ftrace_enabled_restore(int enabled)
+{
+#ifdef CONFIG_FTRACE
+ ftrace_enabled = enabled;
+#endif
+}
+
#ifdef CONFIG_FRAME_POINTER
/* TODO: need to fix this for ARM */
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 4862398e05bf..bf34c5f4c051 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -39,7 +39,6 @@
#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
#define I2C_DRIVERID_SAA7110 22 /* video decoder */
-#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */
#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
#define I2C_DRIVERID_PCF8583 25 /* real time clock */
#define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */
@@ -95,7 +94,6 @@
#define I2C_HW_B_BT848 0x010005 /* BT848 video boards */
#define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */
#define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */
-#define I2C_HW_B_G400 0x010009 /* Matrox G400 */
#define I2C_HW_B_I810 0x01000a /* Intel I810 */
#define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */
#define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */
diff --git a/include/linux/init.h b/include/linux/init.h
index 11b84e106053..93538b696e3d 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -139,6 +139,7 @@ extern initcall_t __con_initcall_start[], __con_initcall_end[];
extern initcall_t __security_initcall_start[], __security_initcall_end[];
/* Defined in init/main.c */
+extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
extern char *saved_command_line;
extern unsigned int reset_devices;
diff --git a/include/linux/ivtv.h b/include/linux/ivtv.h
index 794b8daa9378..17ca64b5a66c 100644
--- a/include/linux/ivtv.h
+++ b/include/linux/ivtv.h
@@ -21,11 +21,7 @@
#ifndef __LINUX_IVTV_H__
#define __LINUX_IVTV_H__
-#ifdef __KERNEL__
-#include <linux/compiler.h> /* need __user */
-#else
-#define __user
-#endif
+#include <linux/compiler.h>
#include <linux/types.h>
/* ivtv knows several distinct output modes: MPEG streaming,
diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h
index e980ba62ddcc..e20af47b59ad 100644
--- a/include/linux/ivtvfb.h
+++ b/include/linux/ivtvfb.h
@@ -21,11 +21,7 @@
#ifndef __LINUX_IVTVFB_H__
#define __LINUX_IVTVFB_H__
-#ifdef __KERNEL__
-#include <linux/compiler.h> /* need __user */
-#else
-#define __user
-#endif
+#include <linux/compiler.h>
#include <linux/types.h>
/* Framebuffer external API */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index aaa998f65c7a..2651f805ba6d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -108,6 +108,13 @@ struct completion;
struct pt_regs;
struct user;
+#ifdef CONFIG_PREEMPT_VOLUNTARY
+extern int _cond_resched(void);
+# define might_resched() _cond_resched()
+#else
+# define might_resched() do { } while (0)
+#endif
+
/**
* might_sleep - annotation for functions that can sleep
*
@@ -118,13 +125,6 @@ struct user;
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int _cond_resched(void);
-# define might_resched() _cond_resched()
-#else
-# define might_resched() do { } while (0)
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
void __might_sleep(char *file, int line);
# define might_sleep() \
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 32110cede64f..17f76fc05173 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -25,8 +25,8 @@
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
#endif
-#ifndef KEXEC_CONTROL_CODE_SIZE
-#error KEXEC_CONTROL_CODE_SIZE not defined
+#ifndef KEXEC_CONTROL_PAGE_SIZE
+#error KEXEC_CONTROL_PAGE_SIZE not defined
#endif
#ifndef KEXEC_ARCH
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2486eb4edbf1..331e5f1c2d8e 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -89,6 +89,7 @@ struct lock_class {
struct lockdep_subclass_key *key;
unsigned int subclass;
+ unsigned int dep_gen_id;
/*
* IRQ/softirq usage tracking bits:
@@ -189,6 +190,14 @@ struct lock_chain {
u64 chain_key;
};
+#define MAX_LOCKDEP_KEYS_BITS 13
+/*
+ * Subtract one because we offset hlock->class_idx by 1 in order
+ * to make 0 mean no class. This avoids overflowing the class_idx
+ * bitfield and hitting the BUG in hlock_class().
+ */
+#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
+
struct held_lock {
/*
* One-way hash of the dependency chain up to this point. We
@@ -205,14 +214,14 @@ struct held_lock {
* with zero), here we store the previous hash value:
*/
u64 prev_chain_key;
- struct lock_class *class;
unsigned long acquire_ip;
struct lockdep_map *instance;
-
+ struct lockdep_map *nest_lock;
#ifdef CONFIG_LOCK_STAT
u64 waittime_stamp;
u64 holdtime_stamp;
#endif
+ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
/*
* The lock-stack is unified in that the lock chains of interrupt
* contexts nest ontop of process context chains, but we 'separate'
@@ -226,11 +235,11 @@ struct held_lock {
* The following field is used to detect when we cross into an
* interrupt context:
*/
- int irq_context;
- int trylock;
- int read;
- int check;
- int hardirqs_off;
+ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
+ unsigned int trylock:1;
+ unsigned int read:2; /* see lock_acquire() comment */
+ unsigned int check:2; /* see lock_acquire() comment */
+ unsigned int hardirqs_off:1;
};
/*
@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
* 2: full validation
*/
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check, unsigned long ip);
+ int trylock, int read, int check,
+ struct lockdep_map *nest_lock, unsigned long ip);
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
+extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
+ unsigned long ip);
+
# define INIT_LOCKDEP .lockdep_recursion = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -313,8 +326,9 @@ static inline void lockdep_on(void)
{
}
-# define lock_acquire(l, s, t, r, c, i) do { } while (0)
+# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
+# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
-# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
+# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# else
-# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
+# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# define spin_release(l, n, i) lock_release(l, n, i)
#else
@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
-# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
-# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
# else
-# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
-# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
# endif
# define rwlock_release(l, n, i) lock_release(l, n, i)
#else
@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
-# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# else
-# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# define mutex_release(l, n, i) lock_release(l, n, i)
#else
@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
-# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
-# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else
-# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
-# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define rwsem_release(l, n, i) do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
+# else
+# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
+# endif
+# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+#else
+# define lock_map_acquire(l) do { } while (0)
+# define lock_map_release(l) do { } while (0)
+#endif
+
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 335288bff1b7..72a15dc26bbf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -73,7 +73,7 @@ extern unsigned int kobjsize(const void *objp);
#endif
/*
- * vm_flags..
+ * vm_flags in vm_area_struct, see mm_types.h.
*/
#define VM_READ 0x00000001 /* currently active flags */
#define VM_WRITE 0x00000002
@@ -834,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
-#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
/*
* get_user_pages_fast provides equivalent functionality to get_user_pages,
* operating on current and current->mm (force=0 and doesn't return any vmas).
@@ -848,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
-#else
-/*
- * Should probably be moved to asm-generic, and architectures can include it if
- * they don't implement their own get_user_pages_fast.
- */
-#define get_user_pages_fast(start, nr_pages, write, pages) \
-({ \
- struct mm_struct *mm = current->mm; \
- int ret; \
- \
- down_read(&mm->mmap_sem); \
- ret = get_user_pages(current, mm, start, nr_pages, \
- write, 0, pages, NULL); \
- up_read(&mm->mmap_sem); \
- \
- ret; \
-})
-#endif
-
/*
* A callback you can register to apply pressure to ageable caches.
*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 386edbe2cb4e..bf334138c7c1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -113,7 +113,7 @@ struct vm_area_struct {
struct vm_area_struct *vm_next;
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
- unsigned long vm_flags; /* Flags, listed below. */
+ unsigned long vm_flags; /* Flags, see mm.h. */
struct rb_node vm_rb;
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 8c774905dcfe..4ab843622727 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern struct lockdep_map rcu_lock_map;
# define rcu_read_acquire() \
- lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
+ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
#else
# define rcu_read_acquire() do { } while (0)
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index b93b541cf111..988e55fe649b 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -59,6 +59,7 @@ extern void machine_crash_shutdown(struct pt_regs *);
* Architecture independent implemenations of sys_reboot commands.
*/
+extern void kernel_restart_prepare(char *cmd);
extern void kernel_restart(char *cmd);
extern void kernel_halt(void);
extern void kernel_power_off(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5270d449ff9d..cfb0d87b99fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -87,6 +87,7 @@ struct sched_param {
#include <linux/task_io_accounting.h>
#include <linux/kobject.h>
#include <linux/latencytop.h>
+#include <linux/cred.h>
#include <asm/processor.h>
@@ -1551,16 +1552,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern unsigned long long sched_clock(void);
-#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-static inline void sched_clock_init(void)
-{
-}
-
-static inline u64 sched_clock_cpu(int cpu)
-{
- return sched_clock();
-}
+extern void sched_clock_init(void);
+extern u64 sched_clock_cpu(int cpu);
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
}
@@ -1572,28 +1567,11 @@ static inline void sched_clock_idle_sleep_event(void)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
-
-#ifdef CONFIG_NO_HZ
-static inline void sched_clock_tick_stop(int cpu)
-{
-}
-
-static inline void sched_clock_tick_start(int cpu)
-{
-}
-#endif
-
-#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
-extern void sched_clock_init(void);
-extern u64 sched_clock_cpu(int cpu);
+#else
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
-#ifdef CONFIG_NO_HZ
-extern void sched_clock_tick_stop(int cpu);
-extern void sched_clock_tick_start(int cpu);
#endif
-#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
diff --git a/include/linux/security.h b/include/linux/security.h
index fd96e7f8a6f9..80c4d002864c 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -46,8 +46,8 @@ struct audit_krule;
*/
extern int cap_capable(struct task_struct *tsk, int cap);
extern int cap_settime(struct timespec *ts, struct timezone *tz);
-extern int cap_ptrace(struct task_struct *parent, struct task_struct *child,
- unsigned int mode);
+extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode);
+extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -1157,17 +1157,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @alter contains the flag indicating whether changes are to be made.
* Return 0 if permission is granted.
*
- * @ptrace:
- * Check permission before allowing the @parent process to trace the
+ * @ptrace_may_access:
+ * Check permission before allowing the current process to trace the
* @child process.
* Security modules may also want to perform a process tracing check
* during an execve in the set_security or apply_creds hooks of
* binprm_security_ops if the process is being traced and its security
* attributes would be changed by the execve.
- * @parent contains the task_struct structure for parent process.
- * @child contains the task_struct structure for child process.
+ * @child contains the task_struct structure for the target process.
* @mode contains the PTRACE_MODE flags indicating the form of access.
* Return 0 if permission is granted.
+ * @ptrace_traceme:
+ * Check that the @parent process has sufficient permission to trace the
+ * current process before allowing the current process to present itself
+ * to the @parent process for tracing.
+ * The parent process will still have to undergo the ptrace_may_access
+ * checks before it is allowed to trace this one.
+ * @parent contains the task_struct structure for debugger process.
+ * Return 0 if permission is granted.
* @capget:
* Get the @effective, @inheritable, and @permitted capability sets for
* the @target process. The hook may also perform permission checking to
@@ -1287,8 +1294,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
struct security_operations {
char name[SECURITY_NAME_MAX + 1];
- int (*ptrace) (struct task_struct *parent, struct task_struct *child,
- unsigned int mode);
+ int (*ptrace_may_access) (struct task_struct *child, unsigned int mode);
+ int (*ptrace_traceme) (struct task_struct *parent);
int (*capget) (struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -1560,8 +1567,8 @@ extern struct dentry *securityfs_create_dir(const char *name, struct dentry *par
extern void securityfs_remove(struct dentry *dentry);
/* Security operations */
-int security_ptrace(struct task_struct *parent, struct task_struct *child,
- unsigned int mode);
+int security_ptrace_may_access(struct task_struct *child, unsigned int mode);
+int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
@@ -1742,11 +1749,15 @@ static inline int security_init(void)
return 0;
}
-static inline int security_ptrace(struct task_struct *parent,
- struct task_struct *child,
- unsigned int mode)
+static inline int security_ptrace_may_access(struct task_struct *child,
+ unsigned int mode)
+{
+ return cap_ptrace_may_access(child, mode);
+}
+
+static inline int security_ptrace_traceme(struct task_struct *parent)
{
- return cap_ptrace(parent, child, mode);
+ return cap_ptrace_traceme(parent);
}
static inline int security_capget(struct task_struct *target,
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index a66304a09955..a1783b229ef4 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mutex.h>
+#include <linux/cpumask.h>
+#include <linux/nodemask.h>
struct seq_operations;
struct file;
@@ -47,6 +49,16 @@ int seq_path(struct seq_file *, struct path *, char *);
int seq_dentry(struct seq_file *, struct dentry *, char *);
int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
char *esc);
+int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits);
+static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask)
+{
+ return seq_bitmap(m, mask->bits, NR_CPUS);
+}
+
+static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
+{
+ return seq_bitmap(m, mask->bits, MAX_NUMNODES);
+}
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_release(struct inode *, struct file *);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cfcc45b3bef0..358661c9990e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -901,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
- !__pskb_pull_tail(skb, len-skb_headlen(skb)))
+ !__pskb_pull_tail(skb, len - skb_headlen(skb)))
return NULL;
skb->len -= len;
return skb->data += len;
@@ -918,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
return 1;
if (unlikely(len > skb->len))
return 0;
- return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
+ return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
/**
@@ -1321,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
unsigned int size = skb->len;
if (likely(size >= len))
return 0;
- return skb_pad(skb, len-size);
+ return skb_pad(skb, len - size);
}
static inline int skb_add_data(struct sk_buff *skb,
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 61e5610ad165..e0c0fccced46 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -183,8 +183,14 @@ do { \
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+# define spin_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
+ _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
#else
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
+# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
#endif
#define write_lock(lock) _write_lock(lock)
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8a2307ce7296..d79845d034b5 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
__acquires(lock);
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index c63435095970..2ce8207686e2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -217,11 +217,11 @@ struct platform_hibernation_ops {
#ifdef CONFIG_HIBERNATION
/* kernel/power/snapshot.c */
extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
-static inline void register_nosave_region(unsigned long b, unsigned long e)
+static inline void __init register_nosave_region(unsigned long b, unsigned long e)
{
__register_nosave_region(b, e, 0);
}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e)
+static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
{
__register_nosave_region(b, e, 1);
}
diff --git a/include/linux/swab.h b/include/linux/swab.h
new file mode 100644
index 000000000000..270d5c208a89
--- /dev/null
+++ b/include/linux/swab.h
@@ -0,0 +1,309 @@
+#ifndef _LINUX_SWAB_H
+#define _LINUX_SWAB_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+
+/*
+ * casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define __const_swab16(x) ((__u16)( \
+ (((__u16)(x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(x) & (__u16)0xff00U) >> 8)))
+
+#define __const_swab32(x) ((__u32)( \
+ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(x) & (__u32)0xff000000UL) >> 24)))
+
+#define __const_swab64(x) ((__u64)( \
+ (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
+
+#define __const_swahw32(x) ((__u32)( \
+ (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
+ (((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
+
+#define __const_swahb32(x) ((__u32)( \
+ (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
+ (((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
+
+/*
+ * Implement the following as inlines, but define the interface using
+ * macros to allow constant folding when possible:
+ * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
+ */
+
+static inline __attribute_const__ __u16 ___swab16(__u16 val)
+{
+#ifdef __arch_swab16
+ return __arch_swab16(val);
+#elif defined(__arch_swab16p)
+ return __arch_swab16p(&val);
+#else
+ return __const_swab16(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swab32(__u32 val)
+{
+#ifdef __arch_swab32
+ return __arch_swab32(val);
+#elif defined(__arch_swab32p)
+ return __arch_swab32p(&val);
+#else
+ return __const_swab32(val);
+#endif
+}
+
+static inline __attribute_const__ __u64 ___swab64(__u64 val)
+{
+#ifdef __arch_swab64
+ return __arch_swab64(val);
+#elif defined(__arch_swab64p)
+ return __arch_swab64p(&val);
+#elif defined(__SWAB_64_THRU_32__)
+ __u32 h = val >> 32;
+ __u32 l = val & ((1ULL << 32) - 1);
+ return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h)));
+#else
+ return __const_swab64(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swahw32(__u32 val)
+{
+#ifdef __arch_swahw32
+ return __arch_swahw32(val);
+#elif defined(__arch_swahw32p)
+ return __arch_swahw32p(&val);
+#else
+ return __const_swahw32(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swahb32(__u32 val)
+{
+#ifdef __arch_swahb32
+ return __arch_swahb32(val);
+#elif defined(__arch_swahb32p)
+ return __arch_swahb32p(&val);
+#else
+ return __const_swahb32(val);
+#endif
+}
+
+/**
+ * __swab16 - return a byteswapped 16-bit value
+ * @x: value to byteswap
+ */
+#define __swab16(x) \
+ (__builtin_constant_p((__u16)(x)) ? \
+ __const_swab16((x)) : \
+ ___swab16((x)))
+
+/**
+ * __swab32 - return a byteswapped 32-bit value
+ * @x: value to byteswap
+ */
+#define __swab32(x) \
+ (__builtin_constant_p((__u32)(x)) ? \
+ __const_swab32((x)) : \
+ ___swab32((x)))
+
+/**
+ * __swab64 - return a byteswapped 64-bit value
+ * @x: value to byteswap
+ */
+#define __swab64(x) \
+ (__builtin_constant_p((__u64)(x)) ? \
+ __const_swab64((x)) : \
+ ___swab64((x)))
+
+/**
+ * __swahw32 - return a word-swapped 32-bit value
+ * @x: value to wordswap
+ *
+ * __swahw32(0x12340000) is 0x00001234
+ */
+#define __swahw32(x) \
+ (__builtin_constant_p((__u32)(x)) ? \
+ __const_swahw32((x)) : \
+ ___swahw32((x)))
+
+/**
+ * __swahb32 - return a high and low byte-swapped 32-bit value
+ * @x: value to byteswap
+ *
+ * __swahb32(0x12345678) is 0x34127856
+ */
+#define __swahb32(x) \
+ (__builtin_constant_p((__u32)(x)) ? \
+ __const_swahb32((x)) : \
+ ___swahb32((x)))
+
+/**
+ * __swab16p - return a byteswapped 16-bit value from a pointer
+ * @p: pointer to a naturally-aligned 16-bit value
+ */
+static inline __u16 __swab16p(const __u16 *p)
+{
+#ifdef __arch_swab16p
+ return __arch_swab16p(p);
+#else
+ return __swab16(*p);
+#endif
+}
+
+/**
+ * __swab32p - return a byteswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ */
+static inline __u32 __swab32p(const __u32 *p)
+{
+#ifdef __arch_swab32p
+ return __arch_swab32p(p);
+#else
+ return __swab32(*p);
+#endif
+}
+
+/**
+ * __swab64p - return a byteswapped 64-bit value from a pointer
+ * @p: pointer to a naturally-aligned 64-bit value
+ */
+static inline __u64 __swab64p(const __u64 *p)
+{
+#ifdef __arch_swab64p
+ return __arch_swab64p(p);
+#else
+ return __swab64(*p);
+#endif
+}
+
+/**
+ * __swahw32p - return a wordswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahw32() for details of wordswapping.
+ */
+static inline __u32 __swahw32p(const __u32 *p)
+{
+#ifdef __arch_swahw32p
+ return __arch_swahw32p(p);
+#else
+ return __swahw32(*p);
+#endif
+}
+
+/**
+ * __swahb32p - return a high and low byteswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahb32() for details of high/low byteswapping.
+ */
+static inline __u32 __swahb32p(const __u32 *p)
+{
+#ifdef __arch_swahb32p
+ return __arch_swahb32p(p);
+#else
+ return __swahb32(*p);
+#endif
+}
+
+/**
+ * __swab16s - byteswap a 16-bit value in-place
+ * @p: pointer to a naturally-aligned 16-bit value
+ */
+static inline void __swab16s(__u16 *p)
+{
+#ifdef __arch_swab16s
+ __arch_swab16s(p);
+#else
+ *p = __swab16p(p);
+#endif
+}
+/**
+ * __swab32s - byteswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ */
+static inline void __swab32s(__u32 *p)
+{
+#ifdef __arch_swab32s
+ __arch_swab32s(p);
+#else
+ *p = __swab32p(p);
+#endif
+}
+
+/**
+ * __swab64s - byteswap a 64-bit value in-place
+ * @p: pointer to a naturally-aligned 64-bit value
+ */
+static inline void __swab64s(__u64 *p)
+{
+#ifdef __arch_swab64s
+ __arch_swab64s(p);
+#else
+ *p = __swab64p(p);
+#endif
+}
+
+/**
+ * __swahw32s - wordswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahw32() for details of wordswapping
+ */
+static inline void __swahw32s(__u32 *p)
+{
+#ifdef __arch_swahw32s
+ __arch_swahw32s(p);
+#else
+ *p = __swahw32p(p);
+#endif
+}
+
+/**
+ * __swahb32s - high and low byteswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahb32() for details of high and low byte swapping
+ */
+static inline void __swahb32s(__u32 *p)
+{
+#ifdef __arch_swahb32s
+ __arch_swahb32s(p);
+#else
+ *p = __swahb32p(p);
+#endif
+}
+
+#ifdef __KERNEL__
+# define swab16 __swab16
+# define swab32 __swab32
+# define swab64 __swab64
+# define swahw32 __swahw32
+# define swahb32 __swahb32
+# define swab16p __swab16p
+# define swab32p __swab32p
+# define swab64p __swab64p
+# define swahw32p __swahw32p
+# define swahb32p __swahb32p
+# define swab16s __swab16s
+# define swab32s __swab32s
+# define swab64s __swab64s
+# define swahw32s __swahw32s
+# define swahb32s __swahb32s
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SWAB_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e3579cb086e0..0cbec74ec086 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -331,6 +331,8 @@ extern int tty_write_room(struct tty_struct *tty);
extern void tty_driver_flush_buffer(struct tty_struct *tty);
extern void tty_throttle(struct tty_struct *tty);
extern void tty_unthrottle(struct tty_struct *tty);
+extern int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize *ws);
extern int is_current_pgrp_orphaned(void);
extern struct pid *tty_get_pgrp(struct tty_struct *tty);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index e1065ac0d922..16d27944c321 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -168,6 +168,18 @@
*
* Optional: If not provided then the write method is called under
* the atomic write lock to keep it serialized with the ldisc.
+ *
+ * int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty,
+ * unsigned int rows, unsigned int cols);
+ *
+ * Called when a termios request is issued which changes the
+ * requested terminal geometry.
+ *
+ * Optional: the default action is to update the termios structure
+ * without error. This is usually the correct behaviour. Drivers should
+ * not force errors here if they are not resizable objects (eg a serial
+ * line). See tty_do_resize() if you need to wrap the standard method
+ * in your own logic - the usual case.
*/
#include <linux/fs.h>
@@ -206,6 +218,8 @@ struct tty_operations {
int (*tiocmget)(struct tty_struct *tty, struct file *file);
int (*tiocmset)(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
+ int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize *ws);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct tty_driver *driver, int line, char *options);
int (*poll_get_char)(struct tty_driver *driver, int line);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 5811c5da69f9..0924cd9c30f6 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -110,6 +110,8 @@ enum usb_interface_condition {
* @sysfs_files_created: sysfs attributes exist
* @needs_remote_wakeup: flag set when the driver requires remote-wakeup
* capability during autosuspend.
+ * @needs_binding: flag set when the driver should be re-probed or unbound
+ * following a reset or suspend operation it doesn't support.
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
new file mode 100644
index 000000000000..630962c04ca4
--- /dev/null
+++ b/include/linux/usb/musb.h
@@ -0,0 +1,98 @@
+/*
+ * This is used to for host and peripheral modes of the driver for
+ * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC.
+ *
+ * Board initialization should put one of these into dev->platform_data,
+ * probably on some platform_device named "musb_hdrc". It encapsulates
+ * key configuration differences between boards.
+ */
+
+/* The USB role is defined by the connector used on the board, so long as
+ * standards are being followed. (Developer boards sometimes won't.)
+ */
+enum musb_mode {
+ MUSB_UNDEFINED = 0,
+ MUSB_HOST, /* A or Mini-A connector */
+ MUSB_PERIPHERAL, /* B or Mini-B connector */
+ MUSB_OTG /* Mini-AB connector */
+};
+
+struct clk;
+
+struct musb_hdrc_eps_bits {
+ const char name[16];
+ u8 bits;
+};
+
+struct musb_hdrc_config {
+ /* MUSB configuration-specific details */
+ unsigned multipoint:1; /* multipoint device */
+ unsigned dyn_fifo:1; /* supports dynamic fifo sizing */
+ unsigned soft_con:1; /* soft connect required */
+ unsigned utm_16:1; /* utm data witdh is 16 bits */
+ unsigned big_endian:1; /* true if CPU uses big-endian */
+ unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */
+ unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */
+ unsigned high_iso_tx:1; /* Tx ep required for HB iso */
+ unsigned high_iso_rx:1; /* Rx ep required for HD iso */
+ unsigned dma:1; /* supports DMA */
+ unsigned vendor_req:1; /* vendor registers required */
+
+ u8 num_eps; /* number of endpoints _with_ ep0 */
+ u8 dma_channels; /* number of dma channels */
+ u8 dyn_fifo_size; /* dynamic size in bytes */
+ u8 vendor_ctrl; /* vendor control reg width */
+ u8 vendor_stat; /* vendor status reg witdh */
+ u8 dma_req_chan; /* bitmask for required dma channels */
+ u8 ram_bits; /* ram address size */
+
+ struct musb_hdrc_eps_bits *eps_bits;
+};
+
+struct musb_hdrc_platform_data {
+ /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */
+ u8 mode;
+
+ /* for clk_get() */
+ const char *clock;
+
+ /* (HOST or OTG) switch VBUS on/off */
+ int (*set_vbus)(struct device *dev, int is_on);
+
+ /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */
+ u8 power;
+
+ /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */
+ u8 min_power;
+
+ /* (HOST or OTG) msec/2 after VBUS on till power good */
+ u8 potpgt;
+
+ /* Power the device on or off */
+ int (*set_power)(int state);
+
+ /* Turn device clock on or off */
+ int (*set_clock)(struct clk *clock, int is_on);
+
+ /* MUSB configuration-specific details */
+ struct musb_hdrc_config *config;
+};
+
+
+/* TUSB 6010 support */
+
+#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */
+#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */
+#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */
+
+#ifdef CONFIG_ARCH_OMAP2
+
+extern int __init tusb6010_setup_interface(
+ struct musb_hdrc_platform_data *data,
+ unsigned ps_refclk, unsigned waitpin,
+ unsigned async_cs, unsigned sync_cs,
+ unsigned irq, unsigned dmachan);
+
+extern int tusb6010_platform_retime(unsigned is_refclk);
+
+#endif /* OMAP2 */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 09a3e6a7518f..655341d0f534 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -17,7 +17,8 @@
#include <linux/mutex.h>
#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
-#define SERIAL_TTY_MINORS 255 /* loads of devices :) */
+#define SERIAL_TTY_MINORS 254 /* loads of devices :) */
+#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */
/* The maximum number of ports one device can grab at once */
#define MAX_NUM_PORTS 8
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index e466bd54a50e..e65a6bed4e3e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -55,13 +55,13 @@
*/
#ifndef __LINUX_VIDEODEV2_H
#define __LINUX_VIDEODEV2_H
+
#ifdef __KERNEL__
#include <linux/time.h> /* need struct timeval */
-#include <linux/compiler.h> /* need __user */
#else
-#define __user
#include <sys/time.h>
#endif
+#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/types.h>
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 364789aae9f3..328eb4022727 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -4,9 +4,9 @@
#include <linux/spinlock.h>
#include <asm/page.h> /* pgprot_t */
-struct vm_area_struct;
+struct vm_area_struct; /* vma defining user mapping in mm_types.h */
-/* bits in vm_struct->flags */
+/* bits in flags of vmalloc's vm_struct below */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 1c78d56c57e5..1cbd0a7db4e6 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -35,7 +35,6 @@ extern int fg_console, last_console, want_console;
int vc_allocate(unsigned int console);
int vc_cons_allocated(unsigned int console);
int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines);
-int vc_lock_resize(struct vc_data *vc, unsigned int cols, unsigned int lines);
void vc_deallocate(unsigned int console);
void reset_palette(struct vc_data *vc);
void do_blank_screen(int entering_gfx);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2f8b3c06a101..bc391ba101e9 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -38,11 +38,6 @@ struct route_info {
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-extern struct rt6_info *ip6_prohibit_entry;
-extern struct rt6_info *ip6_blk_hole_entry;
-#endif
-
extern void ip6_route_input(struct sk_buff *skb);
extern struct dst_entry * ip6_route_output(struct net *net,
@@ -118,7 +113,6 @@ extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
extern void rt6_ifdown(struct net *net, struct net_device *dev);
extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
-extern rwlock_t rt6_lock;
/*
* Store a destination cache entry in a socket
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index cbb59ebed4ae..7312c3dd309f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -140,8 +140,24 @@ struct ip_vs_seq {
/*
- * IPVS statistics object
+ * IPVS statistics objects
*/
+struct ip_vs_estimator {
+ struct list_head list;
+
+ u64 last_inbytes;
+ u64 last_outbytes;
+ u32 last_conns;
+ u32 last_inpkts;
+ u32 last_outpkts;
+
+ u32 cps;
+ u32 inpps;
+ u32 outpps;
+ u32 inbps;
+ u32 outbps;
+};
+
struct ip_vs_stats
{
__u32 conns; /* connections scheduled */
@@ -156,7 +172,15 @@ struct ip_vs_stats
__u32 inbps; /* current in byte rate */
__u32 outbps; /* current out byte rate */
+ /*
+ * Don't add anything before the lock, because we use memcpy() to copy
+ * the members before the lock to struct ip_vs_stats_user in
+ * ip_vs_ctl.c.
+ */
+
spinlock_t lock; /* spin lock */
+
+ struct ip_vs_estimator est; /* estimator */
};
struct dst_entry;
@@ -440,7 +464,7 @@ struct ip_vs_app
*/
extern const char *ip_vs_proto_name(unsigned proto);
extern void ip_vs_init_hash_table(struct list_head *table, int rows);
-#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0]))
+#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
#define IP_VS_APP_TYPE_FTP 1
@@ -620,7 +644,7 @@ extern int sysctl_ip_vs_expire_quiescent_template;
extern int sysctl_ip_vs_sync_threshold[2];
extern int sysctl_ip_vs_nat_icmp_send;
extern struct ip_vs_stats ip_vs_stats;
-extern struct ctl_path net_vs_ctl_path[];
+extern const struct ctl_path net_vs_ctl_path[];
extern struct ip_vs_service *
ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport);
@@ -659,7 +683,7 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
/*
* IPVS rate estimator prototypes (from ip_vs_est.c)
*/
-extern int ip_vs_new_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6affcfaa123e..853fe83d9f37 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,7 +89,10 @@ extern void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
- if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
+ struct netdev_queue *txq = q->dev_queue;
+
+ if (!netif_tx_queue_stopped(txq) &&
+ !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
__qdisc_run(q);
}
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 613173b5db69..920c4e9cb93d 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -41,6 +41,7 @@ struct atmel_lcdfb_info {
struct work_struct task;
unsigned int guard_time;
+ unsigned int smem_len;
struct platform_device *pdev;
struct clk *bus_clk;
struct clk *lcdc_clk;
diff --git a/include/video/radeon.h b/include/video/radeon.h
index 95a1f2038b1d..099ffa5e5bee 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -742,6 +742,10 @@
#define SOFT_RESET_RB (1 << 6)
#define SOFT_RESET_HDP (1 << 7)
+/* WAIT_UNTIL bit constants */
+#define WAIT_DMA_GUI_IDLE (1 << 9)
+#define WAIT_2D_IDLECLEAN (1 << 16)
+
/* SURFACE_CNTL bit consants */
#define SURF_TRANSLATION_DIS (1 << 8)
#define NONSURF_AP0_SWP_16BPP (1 << 20)
diff --git a/init/Kconfig b/init/Kconfig
index b678803deccf..c11da38837e5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -558,17 +558,6 @@ config SYSCTL_SYSCALL
If unsure say Y here.
-config SYSCTL_SYSCALL_CHECK
- bool "Sysctl checks" if EMBEDDED
- depends on SYSCTL_SYSCALL
- default y
- ---help---
- sys_sysctl uses binary paths that have been found challenging
- to properly maintain and use. This enables checks that help
- you to keep things correct.
-
- If unsure say Y here.
-
config KALLSYMS
bool "Load all symbols for debugging/ksymoops" if EMBEDDED
default y
diff --git a/init/main.c b/init/main.c
index 0bc7e167bf45..f6f7042331dc 100644
--- a/init/main.c
+++ b/init/main.c
@@ -691,7 +691,7 @@ asmlinkage void __init start_kernel(void)
rest_init();
}
-static int __initdata initcall_debug;
+static int initcall_debug;
static int __init initcall_debug_setup(char *str)
{
@@ -700,7 +700,7 @@ static int __init initcall_debug_setup(char *str)
}
__setup("initcall_debug", initcall_debug_setup);
-static void __init do_one_initcall(initcall_t fn)
+int do_one_initcall(initcall_t fn)
{
int count = preempt_count();
ktime_t t0, t1, delta;
@@ -740,6 +740,8 @@ static void __init do_one_initcall(initcall_t fn)
print_fn_descriptor_symbol(KERN_WARNING "initcall %s", fn);
printk(" returned with %s\n", msgbuf);
}
+
+ return result;
}
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 382dd5a8b2d7..94fabd534b03 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -55,4 +55,4 @@ config HZ
default 1000 if HZ_1000
config SCHED_HRTICK
- def_bool HIGH_RES_TIMERS && USE_GENERIC_SMP_HELPERS
+ def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS)
diff --git a/kernel/capability.c b/kernel/capability.c
index 0101e847603e..33e51e78c2d8 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -486,17 +486,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
return ret;
}
-int __capable(struct task_struct *t, int cap)
+/**
+ * capable - Determine if the current task has a superior capability in effect
+ * @cap: The capability to be tested for
+ *
+ * Return true if the current task has the given superior capability currently
+ * available for use, false if not.
+ *
+ * This sets PF_SUPERPRIV on the task if the capability is available on the
+ * assumption that it's about to be used.
+ */
+int capable(int cap)
{
- if (security_capable(t, cap) == 0) {
- t->flags |= PF_SUPERPRIV;
+ if (has_capability(current, cap)) {
+ current->flags |= PF_SUPERPRIV;
return 1;
}
return 0;
}
-
-int capable(int cap)
-{
- return __capable(current, cap);
-}
EXPORT_SYMBOL(capable);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index e202a68d1cc1..f17e9854c246 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -349,6 +349,8 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
goto out_notify;
BUG_ON(!cpu_online(cpu));
+ cpu_set(cpu, cpu_active_map);
+
/* Now call notifier in preparation. */
raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
@@ -367,7 +369,7 @@ int __cpuinit cpu_up(unsigned int cpu)
if (!cpu_isset(cpu, cpu_possible_map)) {
printk(KERN_ERR "can't online cpu %d because it is not "
"configured as may-hotadd at boot time\n", cpu);
-#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
+#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
printk(KERN_ERR "please check additional_cpus= boot "
"parameter\n");
#endif
@@ -383,9 +385,6 @@ int __cpuinit cpu_up(unsigned int cpu)
err = _cpu_up(cpu, 0);
- if (cpu_online(cpu))
- cpu_set(cpu, cpu_active_map);
-
out:
cpu_maps_update_done();
return err;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 6c6d35d68ee9..a09dd29c2fd7 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include "internals.h"
@@ -16,23 +17,18 @@ static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP
-static int irq_affinity_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
- struct irq_desc *desc = irq_desc + (long)data;
+ struct irq_desc *desc = irq_desc + (long)m->private;
cpumask_t *mask = &desc->affinity;
- int len;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PENDING)
mask = &desc->pending_mask;
#endif
- len = cpumask_scnprintf(page, count, *mask);
-
- if (count - len < 2)
- return -EINVAL;
- len += sprintf(page + len, "\n");
- return len;
+ seq_cpumask(m, mask);
+ seq_putc(m, '\n');
+ return 0;
}
#ifndef is_affinity_mask_valid
@@ -40,11 +36,12 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off,
#endif
int no_irq_affinity;
-static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t irq_affinity_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
- unsigned int irq = (int)(long)data, full_count = count, err;
+ unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
cpumask_t new_value;
+ int err;
if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
irq_balancing_disabled(irq))
@@ -65,28 +62,38 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
if (!cpus_intersects(new_value, cpu_online_map))
/* Special case for empty set - allow the architecture
code to set default SMP affinity. */
- return irq_select_affinity(irq) ? -EINVAL : full_count;
+ return irq_select_affinity(irq) ? -EINVAL : count;
irq_set_affinity(irq, new_value);
- return full_count;
+ return count;
}
-static int default_affinity_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int irq_affinity_proc_open(struct inode *inode, struct file *file)
{
- int len = cpumask_scnprintf(page, count, irq_default_affinity);
- if (count - len < 2)
- return -EINVAL;
- len += sprintf(page + len, "\n");
- return len;
+ return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
}
-static int default_affinity_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static const struct file_operations irq_affinity_proc_fops = {
+ .open = irq_affinity_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = irq_affinity_proc_write,
+};
+
+static int default_affinity_show(struct seq_file *m, void *v)
+{
+ seq_cpumask(m, &irq_default_affinity);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static ssize_t default_affinity_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
{
- unsigned int full_count = count, err;
cpumask_t new_value;
+ int err;
err = cpumask_parse_user(buffer, count, new_value);
if (err)
@@ -105,8 +112,21 @@ static int default_affinity_write(struct file *file, const char __user *buffer,
irq_default_affinity = new_value;
- return full_count;
+ return count;
}
+
+static int default_affinity_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, default_affinity_show, NULL);
+}
+
+static const struct file_operations default_affinity_proc_fops = {
+ .open = default_affinity_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = default_affinity_write,
+};
#endif
static int irq_spurious_read(char *page, char **start, off_t off,
@@ -178,16 +198,9 @@ void register_irq_proc(unsigned int irq)
irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
#ifdef CONFIG_SMP
- {
- /* create /proc/irq/<irq>/smp_affinity */
- entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
-
- if (entry) {
- entry->data = (void *)(long)irq;
- entry->read_proc = irq_affinity_read_proc;
- entry->write_proc = irq_affinity_write_proc;
- }
- }
+ /* create /proc/irq/<irq>/smp_affinity */
+ proc_create_data("smp_affinity", 0600, irq_desc[irq].dir,
+ &irq_affinity_proc_fops, (void *)(long)irq);
#endif
entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
@@ -208,15 +221,8 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action)
void register_default_affinity_proc(void)
{
#ifdef CONFIG_SMP
- struct proc_dir_entry *entry;
-
- /* create /proc/irq/default_smp_affinity */
- entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir);
- if (entry) {
- entry->data = NULL;
- entry->read_proc = default_affinity_read;
- entry->write_proc = default_affinity_write;
- }
+ proc_create("irq/default_smp_affinity", 0600, NULL,
+ &default_affinity_proc_fops);
#endif
}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index c8a4370e2a34..59f3f0df35d4 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/kexec.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
@@ -77,7 +77,7 @@ int kexec_should_crash(struct task_struct *p)
*
* The code for the transition from the current kernel to the
* the new kernel is placed in the control_code_buffer, whose size
- * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single
+ * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
* page of memory is necessary, but some architectures require more.
* Because this memory must be identity mapped in the transition from
* virtual to physical addresses it must live in the range
@@ -242,7 +242,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
*/
result = -ENOMEM;
image->control_code_page = kimage_alloc_control_pages(image,
- get_order(KEXEC_CONTROL_CODE_SIZE));
+ get_order(KEXEC_CONTROL_PAGE_SIZE));
if (!image->control_code_page) {
printk(KERN_ERR "Could not allocate control_code_buffer\n");
goto out;
@@ -317,7 +317,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
*/
result = -ENOMEM;
image->control_code_page = kimage_alloc_control_pages(image,
- get_order(KEXEC_CONTROL_CODE_SIZE));
+ get_order(KEXEC_CONTROL_PAGE_SIZE));
if (!image->control_code_page) {
printk(KERN_ERR "Could not allocate control_code_buffer\n");
goto out;
@@ -924,19 +924,14 @@ static int kimage_load_segment(struct kimage *image,
*/
struct kimage *kexec_image;
struct kimage *kexec_crash_image;
-/*
- * A home grown binary mutex.
- * Nothing can wait so this mutex is safe to use
- * in interrupt context :)
- */
-static int kexec_lock;
+
+static DEFINE_MUTEX(kexec_mutex);
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
struct kexec_segment __user *segments,
unsigned long flags)
{
struct kimage **dest_image, *image;
- int locked;
int result;
/* We only trust the superuser with rebooting the system. */
@@ -972,8 +967,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
*
* KISS: always take the mutex.
*/
- locked = xchg(&kexec_lock, 1);
- if (locked)
+ if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
dest_image = &kexec_image;
@@ -1015,8 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
image = xchg(dest_image, image);
out:
- locked = xchg(&kexec_lock, 0); /* Release the mutex */
- BUG_ON(!locked);
+ mutex_unlock(&kexec_mutex);
kimage_free(image);
return result;
@@ -1063,10 +1056,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
void crash_kexec(struct pt_regs *regs)
{
- int locked;
-
-
- /* Take the kexec_lock here to prevent sys_kexec_load
+ /* Take the kexec_mutex here to prevent sys_kexec_load
* running on one cpu from replacing the crash kernel
* we are using after a panic on a different cpu.
*
@@ -1074,8 +1064,7 @@ void crash_kexec(struct pt_regs *regs)
* of memory the xchg(&kexec_crash_image) would be
* sufficient. But since I reuse the memory...
*/
- locked = xchg(&kexec_lock, 1);
- if (!locked) {
+ if (mutex_trylock(&kexec_mutex)) {
if (kexec_crash_image) {
struct pt_regs fixed_regs;
crash_setup_regs(&fixed_regs, regs);
@@ -1083,8 +1072,7 @@ void crash_kexec(struct pt_regs *regs)
machine_crash_shutdown(&fixed_regs);
machine_kexec(kexec_crash_image);
}
- locked = xchg(&kexec_lock, 0);
- BUG_ON(!locked);
+ mutex_unlock(&kexec_mutex);
}
}
@@ -1426,25 +1414,23 @@ static int __init crash_save_vmcoreinfo_init(void)
module_init(crash_save_vmcoreinfo_init)
-/**
- * kernel_kexec - reboot the system
- *
- * Move into place and start executing a preloaded standalone
- * executable. If nothing was preloaded return an error.
+/*
+ * Move into place and start executing a preloaded standalone
+ * executable. If nothing was preloaded return an error.
*/
int kernel_kexec(void)
{
int error = 0;
- if (xchg(&kexec_lock, 1))
+ if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
if (!kexec_image) {
error = -EINVAL;
goto Unlock;
}
- if (kexec_image->preserve_context) {
#ifdef CONFIG_KEXEC_JUMP
+ if (kexec_image->preserve_context) {
mutex_lock(&pm_mutex);
pm_prepare_console();
error = freeze_processes();
@@ -1459,6 +1445,7 @@ int kernel_kexec(void)
error = disable_nonboot_cpus();
if (error)
goto Resume_devices;
+ device_pm_lock();
local_irq_disable();
/* At this point, device_suspend() has been called,
* but *not* device_power_down(). We *must*
@@ -1470,26 +1457,22 @@ int kernel_kexec(void)
error = device_power_down(PMSG_FREEZE);
if (error)
goto Enable_irqs;
- save_processor_state();
+ } else
#endif
- } else {
- blocking_notifier_call_chain(&reboot_notifier_list,
- SYS_RESTART, NULL);
- system_state = SYSTEM_RESTART;
- device_shutdown();
- sysdev_shutdown();
+ {
+ kernel_restart_prepare(NULL);
printk(KERN_EMERG "Starting new kernel\n");
machine_shutdown();
}
machine_kexec(kexec_image);
- if (kexec_image->preserve_context) {
#ifdef CONFIG_KEXEC_JUMP
- restore_processor_state();
+ if (kexec_image->preserve_context) {
device_power_up(PMSG_RESTORE);
Enable_irqs:
local_irq_enable();
+ device_pm_unlock();
enable_nonboot_cpus();
Resume_devices:
device_resume(PMSG_RESTORE);
@@ -1499,11 +1482,10 @@ int kernel_kexec(void)
Restore_console:
pm_restore_console();
mutex_unlock(&pm_mutex);
-#endif
}
+#endif
Unlock:
- xchg(&kexec_lock, 0);
-
+ mutex_unlock(&kexec_mutex);
return error;
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index d38a64362973..3bfb1877a003 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
+static inline struct lock_class *hlock_class(struct held_lock *hlock)
+{
+ if (!hlock->class_idx) {
+ DEBUG_LOCKS_WARN_ON(1);
+ return NULL;
+ }
+ return lock_classes + hlock->class_idx - 1;
+}
+
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock)
holdtime = sched_clock() - hlock->holdtime_stamp;
- stats = get_lock_stats(hlock->class);
+ stats = get_lock_stats(hlock_class(hlock));
if (hlock->read)
lock_time_inc(&stats->read_holdtime, holdtime);
else
@@ -372,6 +381,19 @@ unsigned int nr_process_chains;
unsigned int max_lockdep_depth;
unsigned int max_recursion_depth;
+static unsigned int lockdep_dependency_gen_id;
+
+static bool lockdep_dependency_visit(struct lock_class *source,
+ unsigned int depth)
+{
+ if (!depth)
+ lockdep_dependency_gen_id++;
+ if (source->dep_gen_id == lockdep_dependency_gen_id)
+ return true;
+ source->dep_gen_id = lockdep_dependency_gen_id;
+ return false;
+}
+
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* We cannot printk in early bootup code. Not even early_printk()
@@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
static void print_lock(struct held_lock *hlock)
{
- print_lock_name(hlock->class);
+ print_lock_name(hlock_class(hlock));
printk(", at: ");
print_ip_sym(hlock->acquire_ip);
}
@@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
{
struct lock_list *entry;
+ if (lockdep_dependency_visit(class, depth))
+ return;
+
if (DEBUG_LOCKS_WARN_ON(depth >= 20))
return;
@@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void)
if (debug_locks_silent)
return 0;
- this.class = check_source->class;
+ this.class = hlock_class(check_source);
if (!save_trace(&this.trace))
return 0;
@@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void)
return 0;
}
+unsigned long __lockdep_count_forward_deps(struct lock_class *class,
+ unsigned int depth)
+{
+ struct lock_list *entry;
+ unsigned long ret = 1;
+
+ if (lockdep_dependency_visit(class, depth))
+ return 0;
+
+ /*
+ * Recurse this class's dependency list:
+ */
+ list_for_each_entry(entry, &class->locks_after, entry)
+ ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+
+ return ret;
+}
+
+unsigned long lockdep_count_forward_deps(struct lock_class *class)
+{
+ unsigned long ret, flags;
+
+ local_irq_save(flags);
+ __raw_spin_lock(&lockdep_lock);
+ ret = __lockdep_count_forward_deps(class, 0);
+ __raw_spin_unlock(&lockdep_lock);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+unsigned long __lockdep_count_backward_deps(struct lock_class *class,
+ unsigned int depth)
+{
+ struct lock_list *entry;
+ unsigned long ret = 1;
+
+ if (lockdep_dependency_visit(class, depth))
+ return 0;
+ /*
+ * Recurse this class's dependency list:
+ */
+ list_for_each_entry(entry, &class->locks_before, entry)
+ ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+
+ return ret;
+}
+
+unsigned long lockdep_count_backward_deps(struct lock_class *class)
+{
+ unsigned long ret, flags;
+
+ local_irq_save(flags);
+ __raw_spin_lock(&lockdep_lock);
+ ret = __lockdep_count_backward_deps(class, 0);
+ __raw_spin_unlock(&lockdep_lock);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
/*
* Prove that the dependency graph starting at <entry> can not
* lead to <target>. Print an error and return 0 if it does.
@@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
{
struct lock_list *entry;
+ if (lockdep_dependency_visit(source, depth))
+ return 1;
+
debug_atomic_inc(&nr_cyclic_check_recursions);
if (depth > max_recursion_depth)
max_recursion_depth = depth;
@@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
* Check this lock's dependency list:
*/
list_for_each_entry(entry, &source->locks_after, entry) {
- if (entry->class == check_target->class)
+ if (entry->class == hlock_class(check_target))
return print_circular_bug_header(entry, depth+1);
debug_atomic_inc(&nr_cyclic_checks);
if (!check_noncircular(entry->class, depth+1))
@@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
struct lock_list *entry;
int ret;
+ if (lockdep_dependency_visit(source, depth))
+ return 1;
+
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
@@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
struct lock_list *entry;
int ret;
+ if (lockdep_dependency_visit(source, depth))
+ return 1;
+
if (!__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
@@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
return 2;
}
+ if (!source && debug_locks_off_graph_unlock()) {
+ WARN_ON(1);
+ return 0;
+ }
+
/*
* Check this lock's dependency list:
*/
@@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr,
printk("\nand this task is already holding:\n");
print_lock(prev);
printk("which would create a new lock dependency:\n");
- print_lock_name(prev->class);
+ print_lock_name(hlock_class(prev));
printk(" ->");
- print_lock_name(next->class);
+ print_lock_name(hlock_class(next));
printk("\n");
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
@@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
find_usage_bit = bit_backwards;
/* fills in <backwards_match> */
- ret = find_usage_backwards(prev->class, 0);
+ ret = find_usage_backwards(hlock_class(prev), 0);
if (!ret || ret == 1)
return ret;
find_usage_bit = bit_forwards;
- ret = find_usage_forwards(next->class, 0);
+ ret = find_usage_forwards(hlock_class(next), 0);
if (!ret || ret == 1)
return ret;
/* ret == 2 */
@@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
struct lockdep_map *next_instance, int read)
{
struct held_lock *prev;
+ struct held_lock *nest = NULL;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
prev = curr->held_locks + i;
- if (prev->class != next->class)
+
+ if (prev->instance == next->nest_lock)
+ nest = prev;
+
+ if (hlock_class(prev) != hlock_class(next))
continue;
+
/*
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
if ((read == 2) && prev->read)
return 2;
+
+ /*
+ * We're holding the nest_lock, which serializes this lock's
+ * nesting behaviour.
+ */
+ if (nest)
+ return 2;
+
return print_deadlock_bug(curr, prev, next);
}
return 1;
@@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
*/
check_source = next;
check_target = prev;
- if (!(check_noncircular(next->class, 0)))
+ if (!(check_noncircular(hlock_class(next), 0)))
return print_circular_bug_tail();
if (!check_prev_add_irq(curr, prev, next))
@@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* chains - the second one will be new, but L1 already has
* L2 added to its dependency list, due to the first chain.)
*/
- list_for_each_entry(entry, &prev->class->locks_after, entry) {
- if (entry->class == next->class) {
+ list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
+ if (entry->class == hlock_class(next)) {
if (distance == 1)
entry->distance = 1;
return 2;
@@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
- ret = add_lock_to_list(prev->class, next->class,
- &prev->class->locks_after, next->acquire_ip, distance);
+ ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
+ &hlock_class(prev)->locks_after,
+ next->acquire_ip, distance);
if (!ret)
return 0;
- ret = add_lock_to_list(next->class, prev->class,
- &next->class->locks_before, next->acquire_ip, distance);
+ ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
+ &hlock_class(next)->locks_before,
+ next->acquire_ip, distance);
if (!ret)
return 0;
/*
* Debugging printouts:
*/
- if (verbose(prev->class) || verbose(next->class)) {
+ if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
graph_unlock();
printk("\n new dependency: ");
- print_lock_name(prev->class);
+ print_lock_name(hlock_class(prev));
printk(" => ");
- print_lock_name(next->class);
+ print_lock_name(hlock_class(next));
printk("\n");
dump_stack();
return graph_lock();
@@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
struct held_lock *hlock,
u64 chain_key)
{
- struct lock_class *class = hlock->class;
+ struct lock_class *class = hlock_class(hlock);
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
struct held_lock *hlock_curr, *hlock_next;
@@ -1554,7 +1670,7 @@ cache_hit:
if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
chain->base = cn;
for (j = 0; j < chain->depth - 1; j++, i++) {
- int lock_id = curr->held_locks[i].class - lock_classes;
+ int lock_id = curr->held_locks[i].class_idx - 1;
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = class - lock_classes;
@@ -1643,14 +1759,13 @@ static void check_chain_key(struct task_struct *curr)
hlock = curr->held_locks + i;
if (chain_key != hlock->prev_chain_key) {
debug_locks_off();
- printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
+ WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)hlock->prev_chain_key);
- WARN_ON(1);
return;
}
- id = hlock->class - lock_classes;
+ id = hlock->class_idx - 1;
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return;
@@ -1662,11 +1777,10 @@ static void check_chain_key(struct task_struct *curr)
}
if (chain_key != curr->curr_chain_key) {
debug_locks_off();
- printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
+ WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)curr->curr_chain_key);
- WARN_ON(1);
}
#endif
}
@@ -1695,7 +1809,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
print_lock(this);
printk("{%s} state was registered at:\n", usage_str[prev_bit]);
- print_stack_trace(this->class->usage_traces + prev_bit, 1);
+ print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
print_irqtrace_events(curr);
printk("\nother info that might help us debug this:\n");
@@ -1714,7 +1828,7 @@ static inline int
valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
- if (unlikely(this->class->usage_mask & (1 << bad_bit)))
+ if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
return print_usage_bug(curr, this, bad_bit, new_bit);
return 1;
}
@@ -1753,7 +1867,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
lockdep_print_held_locks(curr);
printk("\nthe first lock's dependencies:\n");
- print_lock_dependencies(this->class, 0);
+ print_lock_dependencies(hlock_class(this), 0);
printk("\nthe second lock's dependencies:\n");
print_lock_dependencies(other, 0);
@@ -1776,7 +1890,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
find_usage_bit = bit;
/* fills in <forwards_match> */
- ret = find_usage_forwards(this->class, 0);
+ ret = find_usage_forwards(hlock_class(this), 0);
if (!ret || ret == 1)
return ret;
@@ -1795,7 +1909,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
find_usage_bit = bit;
/* fills in <backwards_match> */
- ret = find_usage_backwards(this->class, 0);
+ ret = find_usage_backwards(hlock_class(this), 0);
if (!ret || ret == 1)
return ret;
@@ -1861,7 +1975,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
return 0;
#endif
- if (hardirq_verbose(this->class))
+ if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_USED_IN_SOFTIRQ:
@@ -1886,7 +2000,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
return 0;
#endif
- if (softirq_verbose(this->class))
+ if (softirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_USED_IN_HARDIRQ_READ:
@@ -1899,7 +2013,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_HARDIRQS, "hard"))
return 0;
- if (hardirq_verbose(this->class))
+ if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_USED_IN_SOFTIRQ_READ:
@@ -1912,7 +2026,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_SOFTIRQS, "soft"))
return 0;
- if (softirq_verbose(this->class))
+ if (softirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_ENABLED_HARDIRQS:
@@ -1938,7 +2052,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
return 0;
#endif
- if (hardirq_verbose(this->class))
+ if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_ENABLED_SOFTIRQS:
@@ -1964,7 +2078,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
return 0;
#endif
- if (softirq_verbose(this->class))
+ if (softirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_ENABLED_HARDIRQS_READ:
@@ -1979,7 +2093,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_HARDIRQ, "hard"))
return 0;
#endif
- if (hardirq_verbose(this->class))
+ if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_ENABLED_SOFTIRQS_READ:
@@ -1994,7 +2108,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_SOFTIRQ, "soft"))
return 0;
#endif
- if (softirq_verbose(this->class))
+ if (softirq_verbose(hlock_class(this)))
ret = 2;
break;
default:
@@ -2310,7 +2424,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
* If already set then do not dirty the cacheline,
* nor do any checks:
*/
- if (likely(this->class->usage_mask & new_mask))
+ if (likely(hlock_class(this)->usage_mask & new_mask))
return 1;
if (!graph_lock())
@@ -2318,14 +2432,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
/*
* Make sure we didnt race:
*/
- if (unlikely(this->class->usage_mask & new_mask)) {
+ if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
graph_unlock();
return 1;
}
- this->class->usage_mask |= new_mask;
+ hlock_class(this)->usage_mask |= new_mask;
- if (!save_trace(this->class->usage_traces + new_bit))
+ if (!save_trace(hlock_class(this)->usage_traces + new_bit))
return 0;
switch (new_bit) {
@@ -2405,7 +2519,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
- unsigned long ip)
+ struct lockdep_map *nest_lock, unsigned long ip)
{
struct task_struct *curr = current;
struct lock_class *class = NULL;
@@ -2459,14 +2573,16 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
return 0;
hlock = curr->held_locks + depth;
-
- hlock->class = class;
+ if (DEBUG_LOCKS_WARN_ON(!class))
+ return 0;
+ hlock->class_idx = class - lock_classes + 1;
hlock->acquire_ip = ip;
hlock->instance = lock;
+ hlock->nest_lock = nest_lock;
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
- hlock->hardirqs_off = hardirqs_off;
+ hlock->hardirqs_off = !!hardirqs_off;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
hlock->holdtime_stamp = sched_clock();
@@ -2574,6 +2690,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
return 1;
}
+static int
+__lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ struct task_struct *curr = current;
+ struct held_lock *hlock, *prev_hlock;
+ struct lock_class *class;
+ unsigned int depth;
+ int i;
+
+ depth = curr->lockdep_depth;
+ if (DEBUG_LOCKS_WARN_ON(!depth))
+ return 0;
+
+ prev_hlock = NULL;
+ for (i = depth-1; i >= 0; i--) {
+ hlock = curr->held_locks + i;
+ /*
+ * We must not cross into another context:
+ */
+ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+ break;
+ if (hlock->instance == lock)
+ goto found_it;
+ prev_hlock = hlock;
+ }
+ return print_unlock_inbalance_bug(curr, lock, ip);
+
+found_it:
+ class = register_lock_class(lock, subclass, 0);
+ hlock->class_idx = class - lock_classes + 1;
+
+ curr->lockdep_depth = i;
+ curr->curr_chain_key = hlock->prev_chain_key;
+
+ for (; i < depth; i++) {
+ hlock = curr->held_locks + i;
+ if (!__lock_acquire(hlock->instance,
+ hlock_class(hlock)->subclass, hlock->trylock,
+ hlock->read, hlock->check, hlock->hardirqs_off,
+ hlock->nest_lock, hlock->acquire_ip))
+ return 0;
+ }
+
+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+ return 0;
+ return 1;
+}
+
/*
* Remove the lock to the list of currently held locks in a
* potentially non-nested (out of order) manner. This is a
@@ -2624,9 +2789,9 @@ found_it:
for (i++; i < depth; i++) {
hlock = curr->held_locks + i;
if (!__lock_acquire(hlock->instance,
- hlock->class->subclass, hlock->trylock,
+ hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip))
return 0;
}
@@ -2669,7 +2834,7 @@ static int lock_release_nested(struct task_struct *curr,
#ifdef CONFIG_DEBUG_LOCKDEP
hlock->prev_chain_key = 0;
- hlock->class = NULL;
+ hlock->class_idx = 0;
hlock->acquire_ip = 0;
hlock->irq_context = 0;
#endif
@@ -2738,18 +2903,36 @@ static void check_flags(unsigned long flags)
#endif
}
+void
+lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ unsigned long flags;
+
+ if (unlikely(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
+ check_flags(flags);
+ if (__lock_set_subclass(lock, subclass, ip))
+ check_chain_key(current);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL_GPL(lock_set_subclass);
+
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
*/
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check, unsigned long ip)
+ int trylock, int read, int check,
+ struct lockdep_map *nest_lock, unsigned long ip)
{
unsigned long flags;
- if (unlikely(!lock_stat && !prove_locking))
- return;
-
if (unlikely(current->lockdep_recursion))
return;
@@ -2758,7 +2941,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 1;
__lock_acquire(lock, subclass, trylock, read, check,
- irqs_disabled_flags(flags), ip);
+ irqs_disabled_flags(flags), nest_lock, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
@@ -2770,9 +2953,6 @@ void lock_release(struct lockdep_map *lock, int nested,
{
unsigned long flags;
- if (unlikely(!lock_stat && !prove_locking))
- return;
-
if (unlikely(current->lockdep_recursion))
return;
@@ -2845,9 +3025,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
found_it:
hlock->waittime_stamp = sched_clock();
- point = lock_contention_point(hlock->class, ip);
+ point = lock_contention_point(hlock_class(hlock), ip);
- stats = get_lock_stats(hlock->class);
+ stats = get_lock_stats(hlock_class(hlock));
if (point < ARRAY_SIZE(stats->contention_point))
stats->contention_point[i]++;
if (lock->cpu != smp_processor_id())
@@ -2893,7 +3073,7 @@ found_it:
hlock->holdtime_stamp = now;
}
- stats = get_lock_stats(hlock->class);
+ stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
if (hlock->read)
lock_time_inc(&stats->read_waittime, waittime);
@@ -2988,6 +3168,7 @@ static void zap_class(struct lock_class *class)
list_del_rcu(&class->hash_entry);
list_del_rcu(&class->lock_entry);
+ class->key = NULL;
}
static inline int within(const void *addr, void *start, unsigned long size)
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index c3600a091a28..56b196932c08 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -17,9 +17,6 @@
*/
#define MAX_LOCKDEP_ENTRIES 8192UL
-#define MAX_LOCKDEP_KEYS_BITS 11
-#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
-
#define MAX_LOCKDEP_CHAINS_BITS 14
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
@@ -53,6 +50,22 @@ extern unsigned int nr_process_chains;
extern unsigned int max_lockdep_depth;
extern unsigned int max_recursion_depth;
+#ifdef CONFIG_PROVE_LOCKING
+extern unsigned long lockdep_count_forward_deps(struct lock_class *);
+extern unsigned long lockdep_count_backward_deps(struct lock_class *);
+#else
+static inline unsigned long
+lockdep_count_forward_deps(struct lock_class *class)
+{
+ return 0;
+}
+static inline unsigned long
+lockdep_count_backward_deps(struct lock_class *class)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* Various lockdep statistics:
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 9b0e940e2545..4b194d34d77f 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v)
{
}
-static unsigned long count_forward_deps(struct lock_class *class)
-{
- struct lock_list *entry;
- unsigned long ret = 1;
-
- /*
- * Recurse this class's dependency list:
- */
- list_for_each_entry(entry, &class->locks_after, entry)
- ret += count_forward_deps(entry->class);
-
- return ret;
-}
-
-static unsigned long count_backward_deps(struct lock_class *class)
-{
- struct lock_list *entry;
- unsigned long ret = 1;
-
- /*
- * Recurse this class's dependency list:
- */
- list_for_each_entry(entry, &class->locks_before, entry)
- ret += count_backward_deps(entry->class);
-
- return ret;
-}
-
static void print_name(struct seq_file *m, struct lock_class *class)
{
char str[128];
@@ -110,7 +82,6 @@ static void print_name(struct seq_file *m, struct lock_class *class)
static int l_show(struct seq_file *m, void *v)
{
- unsigned long nr_forward_deps, nr_backward_deps;
struct lock_class *class = v;
struct lock_list *entry;
char c1, c2, c3, c4;
@@ -124,11 +95,10 @@ static int l_show(struct seq_file *m, void *v)
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", class->ops);
#endif
- nr_forward_deps = count_forward_deps(class);
- seq_printf(m, " FD:%5ld", nr_forward_deps);
-
- nr_backward_deps = count_backward_deps(class);
- seq_printf(m, " BD:%5ld", nr_backward_deps);
+#ifdef CONFIG_PROVE_LOCKING
+ seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
+ seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
+#endif
get_usage_chars(class, &c1, &c2, &c3, &c4);
seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
@@ -229,6 +199,9 @@ static int lc_show(struct seq_file *m, void *v)
for (i = 0; i < chain->depth; i++) {
class = lock_chain_get_class(chain, i);
+ if (!class->key)
+ continue;
+
seq_printf(m, "[%p] ", class->key);
print_name(m, class);
seq_puts(m, "\n");
@@ -350,7 +323,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
nr_hardirq_read_unsafe++;
- sum_forward_deps += count_forward_deps(class);
+#ifdef CONFIG_PROVE_LOCKING
+ sum_forward_deps += lockdep_count_forward_deps(class);
+#endif
}
#ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
diff --git a/kernel/module.c b/kernel/module.c
index 61d212120df4..08864d257eb0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2288,7 +2288,7 @@ sys_init_module(void __user *umod,
/* Start the module */
if (mod->init != NULL)
- ret = mod->init();
+ ret = do_one_initcall(mod->init);
if (ret < 0) {
/* Init routine failed: abort. Try to protect us from
buggy refcounters. */
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 9a21681aa80f..e36d5798cbff 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -289,21 +289,29 @@ void do_schedule_next_timer(struct siginfo *info)
else
schedule_next_timer(timr);
- info->si_overrun = timr->it_overrun_last;
+ info->si_overrun += timr->it_overrun_last;
}
if (timr)
unlock_timer(timr, flags);
}
-int posix_timer_event(struct k_itimer *timr,int si_private)
+int posix_timer_event(struct k_itimer *timr, int si_private)
{
- memset(&timr->sigq->info, 0, sizeof(siginfo_t));
+ /*
+ * FIXME: if ->sigq is queued we can race with
+ * dequeue_signal()->do_schedule_next_timer().
+ *
+ * If dequeue_signal() sees the "right" value of
+ * si_sys_private it calls do_schedule_next_timer().
+ * We re-queue ->sigq and drop ->it_lock().
+ * do_schedule_next_timer() locks the timer
+ * and re-schedules it while ->sigq is pending.
+ * Not really bad, but not that we want.
+ */
timr->sigq->info.si_sys_private = si_private;
- /* Send signal to the process that owns this timer.*/
timr->sigq->info.si_signo = timr->it_sigev_signo;
- timr->sigq->info.si_errno = 0;
timr->sigq->info.si_code = SI_TIMER;
timr->sigq->info.si_tid = timr->it_id;
timr->sigq->info.si_value = timr->it_sigev_value;
@@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void)
kmem_cache_free(posix_timers_cache, tmr);
tmr = NULL;
}
+ memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
return tmr;
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 082b3fcb32a0..356699a96d56 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
- return security_ptrace(current, task, mode);
+ return security_ptrace_may_access(task, mode);
}
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
@@ -499,8 +499,7 @@ repeat:
goto repeat;
}
- ret = security_ptrace(current->parent, current,
- PTRACE_MODE_ATTACH);
+ ret = security_ptrace_traceme(current->parent);
/*
* Set the ptrace bit in the process ptrace flags.
diff --git a/kernel/sched.c b/kernel/sched.c
index 04160d277e7a..9a1ddb84e26d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -600,7 +600,6 @@ struct rq {
/* BKL stats */
unsigned int bkl_count;
#endif
- struct lock_class_key rq_lock_key;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -809,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
* ratelimit for updating the group shares.
- * default: 0.5ms
+ * default: 0.25ms
*/
-const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
+unsigned int sysctl_sched_shares_ratelimit = 250000;
/*
* period over which we measure -rt task cpu usage in us.
@@ -834,7 +833,7 @@ static inline u64 global_rt_period(void)
static inline u64 global_rt_runtime(void)
{
- if (sysctl_sched_rt_period < 0)
+ if (sysctl_sched_rt_runtime < 0)
return RUNTIME_INF;
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
} else {
if (rq1 < rq2) {
spin_lock(&rq1->lock);
- spin_lock(&rq2->lock);
+ spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock(&rq2->lock);
- spin_lock(&rq1->lock);
+ spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
- spin_lock(&this_rq->lock);
+ spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
ret = 1;
} else
- spin_lock(&busiest->lock);
+ spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
}
return ret;
}
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
+ __releases(busiest->lock)
+{
+ spin_unlock(&busiest->lock);
+ lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+}
+
/*
* If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only
@@ -3637,7 +3643,7 @@ redo:
ld_moved = move_tasks(this_rq, this_cpu, busiest,
imbalance, sd, CPU_NEWLY_IDLE,
&all_pinned);
- spin_unlock(&busiest->lock);
+ double_unlock_balance(this_rq, busiest);
if (unlikely(all_pinned)) {
cpu_clear(cpu_of(busiest), *cpus);
@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
else
schedstat_inc(sd, alb_failed);
}
- spin_unlock(&target_rq->lock);
+ double_unlock_balance(busiest_rq, target_rq);
}
#ifdef CONFIG_NO_HZ
@@ -4663,6 +4669,52 @@ int __sched wait_for_completion_killable(struct completion *x)
}
EXPORT_SYMBOL(wait_for_completion_killable);
+/**
+ * try_wait_for_completion - try to decrement a completion without blocking
+ * @x: completion structure
+ *
+ * Returns: 0 if a decrement cannot be done without blocking
+ * 1 if a decrement succeeded.
+ *
+ * If a completion is being used as a counting completion,
+ * attempt to decrement the counter without blocking. This
+ * enables us to avoid waiting if the resource the completion
+ * is protecting is not available.
+ */
+bool try_wait_for_completion(struct completion *x)
+{
+ int ret = 1;
+
+ spin_lock_irq(&x->wait.lock);
+ if (!x->done)
+ ret = 0;
+ else
+ x->done--;
+ spin_unlock_irq(&x->wait.lock);
+ return ret;
+}
+EXPORT_SYMBOL(try_wait_for_completion);
+
+/**
+ * completion_done - Test to see if a completion has any waiters
+ * @x: completion structure
+ *
+ * Returns: 0 if there are waiters (wait_for_completion() in progress)
+ * 1 if there are no waiters.
+ *
+ */
+bool completion_done(struct completion *x)
+{
+ int ret = 1;
+
+ spin_lock_irq(&x->wait.lock);
+ if (!x->done)
+ ret = 0;
+ spin_unlock_irq(&x->wait.lock);
+ return ret;
+}
+EXPORT_SYMBOL(completion_done);
+
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
@@ -5734,6 +5786,8 @@ static inline void sched_init_granularity(void)
sysctl_sched_latency = limit;
sysctl_sched_wakeup_granularity *= factor;
+
+ sysctl_sched_shares_ratelimit *= factor;
}
#ifdef CONFIG_SMP
@@ -8000,7 +8054,6 @@ void __init sched_init(void)
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
- lockdep_set_class(&rq->lock, &rq->rq_lock_key);
rq->nr_running = 0;
init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq);
@@ -8457,8 +8510,8 @@ struct task_group *sched_create_group(struct task_group *parent)
WARN_ON(!parent); /* root should already exist */
tg->parent = parent;
- list_add_rcu(&tg->siblings, &parent->children);
INIT_LIST_HEAD(&tg->children);
+ list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
return tg;
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 22ed55d1167f..204991a0bfa7 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -32,13 +32,19 @@
#include <linux/ktime.h>
#include <linux/module.h>
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ * This is default implementation.
+ * Architectures and sub-architectures can override this.
+ */
+unsigned long long __attribute__((weak)) sched_clock(void)
+{
+ return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+}
-#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static __read_mostly int sched_clock_running;
-#define MULTI_SHIFT 15
-/* Max is double, Min is 1/2 */
-#define MAX_MULTI (2LL << MULTI_SHIFT)
-#define MIN_MULTI (1LL << (MULTI_SHIFT-1))
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
struct sched_clock_data {
/*
@@ -49,14 +55,9 @@ struct sched_clock_data {
raw_spinlock_t lock;
unsigned long tick_jiffies;
- u64 prev_raw;
u64 tick_raw;
u64 tick_gtod;
u64 clock;
- s64 multi;
-#ifdef CONFIG_NO_HZ
- int check_max;
-#endif
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
@@ -71,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
return &per_cpu(sched_clock_data, cpu);
}
-static __read_mostly int sched_clock_running;
-
void sched_clock_init(void)
{
u64 ktime_now = ktime_to_ns(ktime_get());
@@ -84,90 +83,39 @@ void sched_clock_init(void)
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
scd->tick_jiffies = now_jiffies;
- scd->prev_raw = 0;
scd->tick_raw = 0;
scd->tick_gtod = ktime_now;
scd->clock = ktime_now;
- scd->multi = 1 << MULTI_SHIFT;
-#ifdef CONFIG_NO_HZ
- scd->check_max = 1;
-#endif
}
sched_clock_running = 1;
}
-#ifdef CONFIG_NO_HZ
-/*
- * The dynamic ticks makes the delta jiffies inaccurate. This
- * prevents us from checking the maximum time update.
- * Disable the maximum check during stopped ticks.
- */
-void sched_clock_tick_stop(int cpu)
-{
- struct sched_clock_data *scd = cpu_sdc(cpu);
-
- scd->check_max = 0;
-}
-
-void sched_clock_tick_start(int cpu)
-{
- struct sched_clock_data *scd = cpu_sdc(cpu);
-
- scd->check_max = 1;
-}
-
-static int check_max(struct sched_clock_data *scd)
-{
- return scd->check_max;
-}
-#else
-static int check_max(struct sched_clock_data *scd)
-{
- return 1;
-}
-#endif /* CONFIG_NO_HZ */
-
/*
* update the percpu scd from the raw @now value
*
* - filter out backward motion
* - use jiffies to generate a min,max window to clip the raw values
*/
-static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time)
+static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
{
unsigned long now_jiffies = jiffies;
long delta_jiffies = now_jiffies - scd->tick_jiffies;
u64 clock = scd->clock;
u64 min_clock, max_clock;
- s64 delta = now - scd->prev_raw;
+ s64 delta = now - scd->tick_raw;
WARN_ON_ONCE(!irqs_disabled());
-
- /*
- * At schedule tick the clock can be just under the gtod. We don't
- * want to push it too prematurely.
- */
- min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC);
- if (min_clock > TICK_NSEC)
- min_clock -= TICK_NSEC / 2;
+ min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
if (unlikely(delta < 0)) {
clock++;
goto out;
}
- /*
- * The clock must stay within a jiffie of the gtod.
- * But since we may be at the start of a jiffy or the end of one
- * we add another jiffy buffer.
- */
- max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC;
-
- delta *= scd->multi;
- delta >>= MULTI_SHIFT;
+ max_clock = min_clock + TICK_NSEC;
- if (unlikely(clock + delta > max_clock) && check_max(scd)) {
+ if (unlikely(clock + delta > max_clock)) {
if (clock < max_clock)
clock = max_clock;
else
@@ -180,12 +128,10 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim
if (unlikely(clock < min_clock))
clock = min_clock;
- if (time)
- *time = clock;
- else {
- scd->prev_raw = now;
- scd->clock = clock;
- }
+ scd->tick_jiffies = now_jiffies;
+ scd->clock = clock;
+
+ return clock;
}
static void lock_double_clock(struct sched_clock_data *data1,
@@ -203,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1,
u64 sched_clock_cpu(int cpu)
{
struct sched_clock_data *scd = cpu_sdc(cpu);
- u64 now, clock;
+ u64 now, clock, this_clock, remote_clock;
if (unlikely(!sched_clock_running))
return 0ull;
@@ -212,43 +158,44 @@ u64 sched_clock_cpu(int cpu)
now = sched_clock();
if (cpu != raw_smp_processor_id()) {
- /*
- * in order to update a remote cpu's clock based on our
- * unstable raw time rebase it against:
- * tick_raw (offset between raw counters)
- * tick_gotd (tick offset between cpus)
- */
struct sched_clock_data *my_scd = this_scd();
lock_double_clock(scd, my_scd);
- now -= my_scd->tick_raw;
- now += scd->tick_raw;
+ this_clock = __update_sched_clock(my_scd, now);
+ remote_clock = scd->clock;
- now += my_scd->tick_gtod;
- now -= scd->tick_gtod;
+ /*
+ * Use the opportunity that we have both locks
+ * taken to couple the two clocks: we take the
+ * larger time as the latest time for both
+ * runqueues. (this creates monotonic movement)
+ */
+ if (likely(remote_clock < this_clock)) {
+ clock = this_clock;
+ scd->clock = clock;
+ } else {
+ /*
+ * Should be rare, but possible:
+ */
+ clock = remote_clock;
+ my_scd->clock = remote_clock;
+ }
__raw_spin_unlock(&my_scd->lock);
-
- __update_sched_clock(scd, now, &clock);
-
- __raw_spin_unlock(&scd->lock);
-
} else {
__raw_spin_lock(&scd->lock);
- __update_sched_clock(scd, now, NULL);
- clock = scd->clock;
- __raw_spin_unlock(&scd->lock);
+ clock = __update_sched_clock(scd, now);
}
+ __raw_spin_unlock(&scd->lock);
+
return clock;
}
void sched_clock_tick(void)
{
struct sched_clock_data *scd = this_scd();
- unsigned long now_jiffies = jiffies;
- s64 mult, delta_gtod, delta_raw;
u64 now, now_gtod;
if (unlikely(!sched_clock_running))
@@ -260,29 +207,14 @@ void sched_clock_tick(void)
now = sched_clock();
__raw_spin_lock(&scd->lock);
- __update_sched_clock(scd, now, NULL);
+ __update_sched_clock(scd, now);
/*
* update tick_gtod after __update_sched_clock() because that will
* already observe 1 new jiffy; adding a new tick_gtod to that would
* increase the clock 2 jiffies.
*/
- delta_gtod = now_gtod - scd->tick_gtod;
- delta_raw = now - scd->tick_raw;
-
- if ((long)delta_raw > 0) {
- mult = delta_gtod << MULTI_SHIFT;
- do_div(mult, delta_raw);
- scd->multi = mult;
- if (scd->multi > MAX_MULTI)
- scd->multi = MAX_MULTI;
- else if (scd->multi < MIN_MULTI)
- scd->multi = MIN_MULTI;
- } else
- scd->multi = 1 << MULTI_SHIFT;
-
scd->tick_raw = now;
scd->tick_gtod = now_gtod;
- scd->tick_jiffies = now_jiffies;
__raw_spin_unlock(&scd->lock);
}
@@ -301,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
void sched_clock_idle_wakeup_event(u64 delta_ns)
{
struct sched_clock_data *scd = this_scd();
- u64 now = sched_clock();
/*
* Override the previous timestamp and ignore all
@@ -310,27 +241,30 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
* rq clock:
*/
__raw_spin_lock(&scd->lock);
- scd->prev_raw = now;
scd->clock += delta_ns;
- scd->multi = 1 << MULTI_SHIFT;
__raw_spin_unlock(&scd->lock);
touch_softlockup_watchdog();
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
-#endif
+#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
-/*
- * Scheduler clock - returns current time in nanosec units.
- * This is default implementation.
- * Architectures and sub-architectures can override this.
- */
-unsigned long long __attribute__((weak)) sched_clock(void)
+void sched_clock_init(void)
{
- return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+ sched_clock_running = 1;
}
+u64 sched_clock_cpu(int cpu)
+{
+ if (unlikely(!sched_clock_running))
+ return 0;
+
+ return sched_clock();
+}
+
+#endif
+
unsigned long long cpu_clock(int cpu)
{
unsigned long long clock;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index cf2cd6ce4cb2..fb8994c6d4bb 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -899,7 +899,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
* doesn't make sense. Rely on vruntime for fairness.
*/
if (rq->curr != p)
- delta = max(10000LL, delta);
+ delta = max_t(s64, 10000LL, delta);
hrtick_start(rq, delta);
}
@@ -1442,18 +1442,23 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
struct task_struct *p = NULL;
struct sched_entity *se;
- while (next != &cfs_rq->tasks) {
+ if (next == &cfs_rq->tasks)
+ return NULL;
+
+ /* Skip over entities that are not tasks */
+ do {
se = list_entry(next, struct sched_entity, group_node);
next = next->next;
+ } while (next != &cfs_rq->tasks && !entity_is_task(se));
- /* Skip over entities that are not tasks */
- if (entity_is_task(se)) {
- p = task_of(se);
- break;
- }
- }
+ if (next == &cfs_rq->tasks)
+ return NULL;
cfs_rq->balance_iterator = next;
+
+ if (entity_is_task(se))
+ p = task_of(se);
+
return p;
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 908c04f9dad0..998ba54b4543 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -298,7 +298,7 @@ static void __disable_runtime(struct rq *rq)
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
- if (iter == rt_rq)
+ if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
spin_lock(&iter->rt_runtime_lock);
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
#define RT_MAX_TRIES 3
static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
+
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
break;
/* try again */
- spin_unlock(&lowest_rq->lock);
+ double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
}
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
resched_task(lowest_rq->curr);
- spin_unlock(&lowest_rq->lock);
+ double_unlock_balance(rq, lowest_rq);
ret = 1;
out:
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
}
skip:
- spin_unlock(&src_rq->lock);
+ double_unlock_balance(this_rq, src_rq);
}
return ret;
diff --git a/kernel/signal.c b/kernel/signal.c
index 954f77d7e3bc..c539f60c6f41 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1304,6 +1304,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
q->info.si_overrun++;
goto out;
}
+ q->info.si_overrun = 0;
signalfd_notify(t, sig);
pending = group ? &t->signal->shared_pending : &t->pending;
diff --git a/kernel/smp.c b/kernel/smp.c
index 96fc7c0edc59..782e2b93e465 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -135,7 +135,8 @@ void generic_smp_call_function_interrupt(void)
*/
smp_wmb();
data->csd.flags &= ~CSD_FLAG_WAIT;
- } else
+ }
+ if (data->csd.flags & CSD_FLAG_ALLOC)
call_rcu(&data->rcu_head, rcu_free_call_data);
}
rcu_read_unlock();
@@ -260,6 +261,42 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
generic_exec_single(cpu, data);
}
+/* Dummy function */
+static void quiesce_dummy(void *unused)
+{
+}
+
+/*
+ * Ensure stack based data used in call function mask is safe to free.
+ *
+ * This is needed by smp_call_function_mask when using on-stack data, because
+ * a single call function queue is shared by all CPUs, and any CPU may pick up
+ * the data item on the queue at any time before it is deleted. So we need to
+ * ensure that all CPUs have transitioned through a quiescent state after
+ * this call.
+ *
+ * This is a very slow function, implemented by sending synchronous IPIs to
+ * all possible CPUs. For this reason, we have to alloc data rather than use
+ * stack based data even in the case of synchronous calls. The stack based
+ * data is then just used for deadlock/oom fallback which will be very rare.
+ *
+ * If a faster scheme can be made, we could go back to preferring stack based
+ * data -- the data allocation/free is non-zero cost.
+ */
+static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
+{
+ struct call_single_data data;
+ int cpu;
+
+ data.func = quiesce_dummy;
+ data.info = NULL;
+
+ for_each_cpu_mask(cpu, mask) {
+ data.flags = CSD_FLAG_WAIT;
+ generic_exec_single(cpu, &data);
+ }
+}
+
/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on.
@@ -285,6 +322,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
cpumask_t allbutself;
unsigned long flags;
int cpu, num_cpus;
+ int slowpath = 0;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
@@ -306,15 +344,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
return smp_call_function_single(cpu, func, info, wait);
}
- if (!wait) {
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
- if (data)
- data->csd.flags = CSD_FLAG_ALLOC;
- }
- if (!data) {
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+ if (data) {
+ data->csd.flags = CSD_FLAG_ALLOC;
+ if (wait)
+ data->csd.flags |= CSD_FLAG_WAIT;
+ } else {
data = &d;
data->csd.flags = CSD_FLAG_WAIT;
wait = 1;
+ slowpath = 1;
}
spin_lock_init(&data->lock);
@@ -331,8 +370,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
arch_send_call_function_ipi(mask);
/* optionally wait for the CPUs to complete */
- if (wait)
+ if (wait) {
csd_flag_wait(&data->csd);
+ if (unlikely(slowpath))
+ smp_call_function_mask_quiesce_stack(mask);
+ }
return 0;
}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index a1fb54c93cdd..29ab20749dd3 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -290,8 +290,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-
EXPORT_SYMBOL(_spin_lock_nested);
+
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
{
unsigned long flags;
@@ -311,9 +311,17 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
#endif
return flags;
}
-
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
+ struct lockdep_map *nest_lock)
+{
+ preempt_disable();
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+EXPORT_SYMBOL(_spin_lock_nest_lock);
+
#endif
void __lockfunc _spin_unlock(spinlock_t *lock)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index e446c7c7d6a9..af3c7cea258b 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -65,7 +65,6 @@ static void ack_state(void)
static int stop_cpu(struct stop_machine_data *smdata)
{
enum stopmachine_state curstate = STOPMACHINE_NONE;
- int uninitialized_var(ret);
/* Simple state machine */
do {
diff --git a/kernel/sys.c b/kernel/sys.c
index c01858090a98..3dacb00a7f76 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -274,7 +274,7 @@ void emergency_restart(void)
}
EXPORT_SYMBOL_GPL(emergency_restart);
-static void kernel_restart_prepare(char *cmd)
+void kernel_restart_prepare(char *cmd)
{
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 825b4c00fe44..f5da526424a9 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle)
ts->tick_stopped = 1;
ts->idle_jiffies = last_jiffies;
rcu_enter_nohz();
- sched_clock_tick_stop(cpu);
}
/*
@@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void)
select_nohz_load_balancer(0);
now = ktime_get();
tick_do_update_jiffies64(now);
- sched_clock_tick_start(cpu);
cpu_clear(cpu, nohz_cpu_mask);
/*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4a26a1382df0..4048e92aa04f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work);
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
f(work);
- lock_release(&lockdep_map, 1, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ lock_map_release(&lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
int cpu;
might_sleep();
- lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&wq->lockdep_map, 1, _THIS_IP_);
+ lock_map_acquire(&wq->lockdep_map);
+ lock_map_release(&wq->lockdep_map);
for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
if (!cwq)
return 0;
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
prev = NULL;
spin_lock_irq(&cwq->lock);
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
might_sleep();
- lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&work->lockdep_map, 1, _THIS_IP_);
+ lock_map_acquire(&work->lockdep_map);
+ lock_map_release(&work->lockdep_map);
cwq = get_wq_data(work);
if (!cwq)
@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
if (cwq->thread == NULL)
return;
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
flush_cpu_workqueue(cwq);
/*
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e1d4764435ed..8b5a7d304a5f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -693,6 +693,14 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
+config SYSCTL_SYSCALL_CHECK
+ bool "Sysctl checks"
+ depends on SYSCTL_SYSCALL
+ ---help---
+ sys_sysctl uses binary paths that have been found challenging
+ to properly maintain and use. This enables checks that help
+ you to keep things correct.
+
source kernel/trace/Kconfig
config PROVIDE_OHCI1394_DMA_INIT
@@ -735,6 +743,15 @@ config FIREWIRE_OHCI_REMOTE_DMA
If unsure, say N.
+menuconfig BUILD_DOCSRC
+ bool "Build targets in Documentation/ tree"
+ depends on HEADERS_CHECK
+ help
+ This option attempts to build objects from the source files in the
+ kernel Documentation/ tree.
+
+ Say N if you are unsure.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 482df94ea21e..06fb57c86de0 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,6 +316,17 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
EXPORT_SYMBOL(bitmap_scnprintf);
/**
+ * bitmap_scnprintf_len - return buffer length needed to convert
+ * bitmap to an ASCII hex string
+ * @nr_bits: number of bits to be converted
+ */
+int bitmap_scnprintf_len(unsigned int nr_bits)
+{
+ unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4;
+ return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1;
+}
+
+/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 0ef01d14727c..0218b4693dd8 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -8,6 +8,7 @@
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
+#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -37,6 +38,7 @@ int debug_locks_off(void)
{
if (xchg(&debug_locks, 0)) {
if (!debug_locks_silent) {
+ oops_in_progress = 1;
console_verbose();
return 1;
}
diff --git a/lib/lmb.c b/lib/lmb.c
index 5d7b9286503e..97e547037084 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -462,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit)
if (lmb.memory.region[0].size < lmb.rmo_size)
lmb.rmo_size = lmb.memory.region[0].size;
+ memory_limit = lmb_end_of_DRAM();
+
/* And truncate any reserves above the limit also. */
for (i = 0; i < lmb.reserved.cnt; i++) {
p = &lmb.reserved.region[i];
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 1dc2d1d18fa8..d8d1d1142248 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -220,7 +220,7 @@ int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\
if (len == 0) \
return -EINVAL; \
\
- val = simple_strtoul(cp, &tail, base); \
+ val = simple_strtou##type(cp, &tail, base); \
if ((*tail == '\0') || \
((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\
*res = val; \
diff --git a/mm/Kconfig b/mm/Kconfig
index 446c6588c753..0bd9c2dbb2a0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -77,9 +77,6 @@ config FLAT_NODE_MEM_MAP
def_bool y
depends on !SPARSEMEM
-config HAVE_GET_USER_PAGES_FAST
- bool
-
#
# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
# to represent different areas of memory. This variable allows
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 4af15d0340ad..e023c68b0255 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -473,7 +473,7 @@ find_block:
goto find_block;
}
- if (bdata->last_end_off &&
+ if (bdata->last_end_off & (PAGE_SIZE - 1) &&
PFN_DOWN(bdata->last_end_off) + 1 == sidx)
start_off = ALIGN(bdata->last_end_off, align);
else
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 757ca983fd99..67a71191136e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -565,7 +565,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
huge_page_order(h));
if (page) {
if (arch_prepare_hugepage(page)) {
- __free_pages(page, HUGETLB_PAGE_ORDER);
+ __free_pages(page, huge_page_order(h));
return NULL;
}
prep_new_huge_page(h, page, nid);
@@ -665,6 +665,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
+ if (page && arch_prepare_hugepage(page)) {
+ __free_pages(page, huge_page_order(h));
+ return NULL;
+ }
+
spin_lock(&hugetlb_lock);
if (page) {
/*
@@ -1937,6 +1942,18 @@ retry:
lock_page(page);
}
+ /*
+ * If we are going to COW a private mapping later, we examine the
+ * pending reservations for this page now. This will ensure that
+ * any allocations necessary to record that reservation occur outside
+ * the spinlock.
+ */
+ if (write_access && !(vma->vm_flags & VM_SHARED))
+ if (vma_needs_reservation(h, vma, address) < 0) {
+ ret = VM_FAULT_OOM;
+ goto backout_unlocked;
+ }
+
spin_lock(&mm->page_table_lock);
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
@@ -1962,6 +1979,7 @@ out:
backout:
spin_unlock(&mm->page_table_lock);
+backout_unlocked:
unlock_page(page);
put_page(page);
goto out;
@@ -1973,6 +1991,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *ptep;
pte_t entry;
int ret;
+ struct page *pagecache_page = NULL;
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
@@ -1989,25 +2008,44 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
- mutex_unlock(&hugetlb_instantiation_mutex);
- return ret;
+ goto out_unlock;
}
ret = 0;
+ /*
+ * If we are going to COW the mapping later, we examine the pending
+ * reservations for this page now. This will ensure that any
+ * allocations necessary to record that reservation occur outside the
+ * spinlock. For private mappings, we also lookup the pagecache
+ * page now as it is used to determine if a reservation has been
+ * consumed.
+ */
+ if (write_access && !pte_write(entry)) {
+ if (vma_needs_reservation(h, vma, address) < 0) {
+ ret = VM_FAULT_OOM;
+ goto out_unlock;
+ }
+
+ if (!(vma->vm_flags & VM_SHARED))
+ pagecache_page = hugetlbfs_pagecache_page(h,
+ vma, address);
+ }
+
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
if (likely(pte_same(entry, huge_ptep_get(ptep))))
- if (write_access && !pte_write(entry)) {
- struct page *page;
- page = hugetlbfs_pagecache_page(h, vma, address);
- ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
- if (page) {
- unlock_page(page);
- put_page(page);
- }
- }
+ if (write_access && !pte_write(entry))
+ ret = hugetlb_cow(mm, vma, address, ptep, entry,
+ pagecache_page);
spin_unlock(&mm->page_table_lock);
+
+ if (pagecache_page) {
+ unlock_page(pagecache_page);
+ put_page(pagecache_page);
+ }
+
+out_unlock:
mutex_unlock(&hugetlb_instantiation_mutex);
return ret;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7056c3bdb478..0f1f7a7374ba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -796,6 +796,8 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
if (mem_cgroup_subsys.disabled)
return 0;
+ if (!mm)
+ return 0;
rcu_read_lock();
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e550bec20582..83369058ec13 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -803,7 +803,6 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
{
- LIST_HEAD(pagelist);
int busy = 0;
int err = 0;
nodemask_t tmp;
diff --git a/mm/mmap.c b/mm/mmap.c
index 971d0eda754a..339cf5c4d5d8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm,
static DEFINE_MUTEX(mm_all_locks_mutex);
-static void vm_lock_anon_vma(struct anon_vma *anon_vma)
+static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
{
if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
/*
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
- spin_lock(&anon_vma->lock);
+ spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
/*
* We can safely modify head.next after taking the
* anon_vma->lock. If some other vma in this mm shares
@@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct anon_vma *anon_vma)
}
}
-static void vm_lock_mapping(struct address_space *mapping)
+static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
{
if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
/*
@@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct address_space *mapping)
*/
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
BUG();
- spin_lock(&mapping->i_mmap_lock);
+ spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
}
}
@@ -2358,11 +2358,17 @@ int mm_take_all_locks(struct mm_struct *mm)
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current))
goto out_unlock;
- if (vma->anon_vma)
- vm_lock_anon_vma(vma->anon_vma);
if (vma->vm_file && vma->vm_file->f_mapping)
- vm_lock_mapping(vma->vm_file->f_mapping);
+ vm_lock_mapping(mm, vma->vm_file->f_mapping);
+ }
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (signal_pending(current))
+ goto out_unlock;
+ if (vma->anon_vma)
+ vm_lock_anon_vma(mm, vma->anon_vma);
}
+
ret = 0;
out_unlock:
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 8a5467ee6265..64e5b4bcd964 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/memcontrol.h>
+#include <linux/security.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
@@ -128,7 +129,8 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* Superuser processes are usually more important, so we make it
* less likely that we kill those.
*/
- if (__capable(p, CAP_SYS_ADMIN) || __capable(p, CAP_SYS_RESOURCE))
+ if (has_capability(p, CAP_SYS_ADMIN) ||
+ has_capability(p, CAP_SYS_RESOURCE))
points /= 4;
/*
@@ -137,7 +139,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* tend to only have this flag set on applications they think
* of as important.
*/
- if (__capable(p, CAP_SYS_RAWIO))
+ if (has_capability(p, CAP_SYS_RAWIO))
points /= 4;
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 401d104d2bb6..af982f7cdb2a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4437,7 +4437,7 @@ void *__init alloc_large_system_hash(const char *tablename,
do {
size = bucketsize << log2qty;
if (flags & HASH_EARLY)
- table = alloc_bootmem(size);
+ table = alloc_bootmem_nopanic(size);
else if (hashdist)
table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
else {
diff --git a/mm/sparse.c b/mm/sparse.c
index 5d9dbbb9d39e..39db301b920d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -12,7 +12,6 @@
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include "internal.h"
/*
* Permanent SPARSEMEM data:
diff --git a/mm/util.c b/mm/util.c
index 9341ca77bd88..cb00b748ce47 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -171,3 +171,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->unmap_area = arch_unmap_area;
}
#endif
+
+int __attribute__((weak)) get_user_pages_fast(unsigned long start,
+ int nr_pages, int write, struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start, nr_pages,
+ write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(get_user_pages_fast);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 57abe8266be1..a89f32fa94f6 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -99,7 +99,7 @@ struct gen_estimator_head
static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
-/* Protects against NULL dereference */
+/* Protects against NULL dereference and RCU write-side */
static DEFINE_RWLOCK(est_lock);
static void est_timer(unsigned long arg)
@@ -185,6 +185,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
+ write_lock_bh(&est_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
@@ -194,6 +195,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
list_add_rcu(&est->list, &elist[idx].list);
+ write_unlock_bh(&est_lock);
return 0;
}
@@ -212,7 +214,6 @@ static void __gen_kill_estimator(struct rcu_head *head)
* Removes the rate estimator specified by &bstats and &rate_est
* and deletes the timer.
*
- * NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic *bstats,
struct gnet_stats_rate_est *rate_est)
@@ -226,17 +227,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats,
if (!elist[idx].timer.function)
continue;
+ write_lock_bh(&est_lock);
list_for_each_entry_safe(e, n, &elist[idx].list, list) {
if (e->rate_est != rate_est || e->bstats != bstats)
continue;
- write_lock_bh(&est_lock);
e->bstats = NULL;
- write_unlock_bh(&est_lock);
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
+ write_unlock_bh(&est_lock);
}
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 526236453908..a756847e3814 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1961,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
*/
static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
{
+ int ntxq;
+
if (!pkt_dev->odev) {
printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
"setup_inject.\n");
@@ -1969,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
return;
}
+ /* make sure that we don't pick a non-existing transmit queue */
+ ntxq = pkt_dev->odev->real_num_tx_queues;
+ if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
+ printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
+ "disabled because CPU count (%d) exceeds number ",
+ num_online_cpus());
+ printk(KERN_WARNING "pktgen: WARNING: of tx queues "
+ "(%d) on %s \n", ntxq, pkt_dev->odev->name);
+ pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
+ }
+ if (ntxq <= pkt_dev->queue_map_min) {
+ printk(KERN_WARNING "pktgen: WARNING: Requested "
+ "queue_map_min (%d) exceeds number of tx\n",
+ pkt_dev->queue_map_min);
+ printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+ "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ pkt_dev->queue_map_min = ntxq - 1;
+ }
+ if (ntxq <= pkt_dev->queue_map_max) {
+ printk(KERN_WARNING "pktgen: WARNING: Requested "
+ "queue_map_max (%d) exceeds number of tx\n",
+ pkt_dev->queue_map_max);
+ printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+ "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ pkt_dev->queue_map_max = ntxq - 1;
+ }
+
/* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac))
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b622d9744856..1ca3b26eed0f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -474,6 +474,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type,
if (copy_from_user(&opt, optval, sizeof(opt)))
return -EFAULT;
+ /*
+ * rfc4340: 6.1. Change Options
+ */
+ if (opt.dccpsf_len < 1)
+ return -EINVAL;
val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
if (!val)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6203ece53606..f70fac612596 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -289,6 +289,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct rtable *rt;
struct iphdr *pip;
struct igmpv3_report *pig;
+ struct net *net = dev_net(dev);
skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL)
@@ -299,7 +300,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
.nl_u = { .ip4_u = {
.daddr = IGMPV3_ALL_MCR } },
.proto = IPPROTO_IGMP };
- if (ip_route_output_key(&init_net, &rt, &fl)) {
+ if (ip_route_output_key(net, &rt, &fl)) {
kfree_skb(skb);
return NULL;
}
@@ -629,6 +630,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
struct igmphdr *ih;
struct rtable *rt;
struct net_device *dev = in_dev->dev;
+ struct net *net = dev_net(dev);
__be32 group = pmc ? pmc->multiaddr : 0;
__be32 dst;
@@ -643,7 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
struct flowi fl = { .oif = dev->ifindex,
.nl_u = { .ip4_u = { .daddr = dst } },
.proto = IPPROTO_IGMP };
- if (ip_route_output_key(&init_net, &rt, &fl))
+ if (ip_route_output_key(net, &rt, &fl))
return -1;
}
if (rt->rt_src == 0) {
@@ -1196,9 +1198,6 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
for (im=in_dev->mc_list; im; im=im->next) {
if (im->multiaddr == addr) {
im->users++;
@@ -1278,9 +1277,6 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
if (i->multiaddr==addr) {
if (--i->users == 0) {
@@ -1308,9 +1304,6 @@ void ip_mc_down(struct in_device *in_dev)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
for (i=in_dev->mc_list; i; i=i->next)
igmp_group_dropped(i);
@@ -1331,9 +1324,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
{
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
in_dev->mc_tomb = NULL;
#ifdef CONFIG_IP_MULTICAST
in_dev->mr_gq_running = 0;
@@ -1357,9 +1347,6 @@ void ip_mc_up(struct in_device *in_dev)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for (i=in_dev->mc_list; i; i=i->next)
@@ -1376,9 +1363,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
/* Deactivate timers */
ip_mc_down(in_dev);
@@ -1395,7 +1379,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
write_unlock_bh(&in_dev->mc_list_lock);
}
-static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
+static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
{
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = imr->imr_multiaddr.s_addr } } };
@@ -1404,19 +1388,19 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
struct in_device *idev = NULL;
if (imr->imr_ifindex) {
- idev = inetdev_by_index(&init_net, imr->imr_ifindex);
+ idev = inetdev_by_index(net, imr->imr_ifindex);
if (idev)
__in_dev_put(idev);
return idev;
}
if (imr->imr_address.s_addr) {
- dev = ip_dev_find(&init_net, imr->imr_address.s_addr);
+ dev = ip_dev_find(net, imr->imr_address.s_addr);
if (!dev)
return NULL;
dev_put(dev);
}
- if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) {
+ if (!dev && !ip_route_output_key(net, &rt, &fl)) {
dev = rt->u.dst.dev;
ip_rt_put(rt);
}
@@ -1754,18 +1738,16 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
struct ip_mc_socklist *iml=NULL, *i;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
+ struct net *net = sock_net(sk);
int ifindex;
int count = 0;
if (!ipv4_is_multicast(addr))
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
- in_dev = ip_mc_find_dev(imr);
+ in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
iml = NULL;
@@ -1827,15 +1809,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml, **imlp;
struct in_device *in_dev;
+ struct net *net = sock_net(sk);
__be32 group = imr->imr_multiaddr.s_addr;
u32 ifindex;
int ret = -EADDRNOTAVAIL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
- in_dev = ip_mc_find_dev(imr);
+ in_dev = ip_mc_find_dev(net, imr);
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
if (iml->multi.imr_multiaddr.s_addr != group)
@@ -1873,21 +1853,19 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
struct in_device *in_dev = NULL;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
+ struct net *net = sock_net(sk);
int leavegroup = 0;
int i, j, rv;
if (!ipv4_is_multicast(addr))
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
imr.imr_address.s_addr = mreqs->imr_interface;
imr.imr_ifindex = ifindex;
- in_dev = ip_mc_find_dev(&imr);
+ in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
@@ -2007,6 +1985,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *newpsl, *psl;
+ struct net *net = sock_net(sk);
int leavegroup = 0;
if (!ipv4_is_multicast(addr))
@@ -2015,15 +1994,12 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
msf->imsf_fmode != MCAST_EXCLUDE)
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = ifindex;
- in_dev = ip_mc_find_dev(&imr);
+ in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
@@ -2094,19 +2070,17 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
+ struct net *net = sock_net(sk);
if (!ipv4_is_multicast(addr))
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = 0;
- in_dev = ip_mc_find_dev(&imr);
+ in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
@@ -2163,9 +2137,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
if (!ipv4_is_multicast(addr))
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
err = -EADDRNOTAVAIL;
@@ -2246,19 +2217,17 @@ void ip_mc_drop_socket(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml;
+ struct net *net = sock_net(sk);
if (inet->mc_list == NULL)
return;
- if (!net_eq(sock_net(sk), &init_net))
- return;
-
rtnl_lock();
while ((iml = inet->mc_list) != NULL) {
struct in_device *in_dev;
inet->mc_list = iml->next;
- in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex);
+ in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
if (in_dev != NULL) {
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 1f1897a1a702..201b8ea3020d 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
}
-int ip_vs_app_init(void)
+int __init ip_vs_app_init(void)
{
/* we will replace it with proc_net_ipvs_create() soon */
proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index f8bdae47a77f..44a6872dc245 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void)
}
-int ip_vs_conn_init(void)
+int __init ip_vs_conn_init(void)
{
int idx;
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 9a5ace0b4dd6..6379705a8dcb 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -683,9 +683,22 @@ static void
ip_vs_zero_stats(struct ip_vs_stats *stats)
{
spin_lock_bh(&stats->lock);
- memset(stats, 0, (char *)&stats->lock - (char *)stats);
- spin_unlock_bh(&stats->lock);
+
+ stats->conns = 0;
+ stats->inpkts = 0;
+ stats->outpkts = 0;
+ stats->inbytes = 0;
+ stats->outbytes = 0;
+
+ stats->cps = 0;
+ stats->inpps = 0;
+ stats->outpps = 0;
+ stats->inbps = 0;
+ stats->outbps = 0;
+
ip_vs_zero_estimator(stats);
+
+ spin_unlock_bh(&stats->lock);
}
/*
@@ -1589,7 +1602,7 @@ static struct ctl_table vs_vars[] = {
{ .ctl_name = 0 }
};
-struct ctl_path net_vs_ctl_path[] = {
+const struct ctl_path net_vs_ctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "ipv4", .ctl_name = NET_IPV4, },
{ .procname = "vs", },
@@ -1784,7 +1797,9 @@ static const struct file_operations ip_vs_info_fops = {
#endif
-struct ip_vs_stats ip_vs_stats;
+struct ip_vs_stats ip_vs_stats = {
+ .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
+};
#ifdef CONFIG_PROC_FS
static int ip_vs_stats_show(struct seq_file *seq, void *v)
@@ -2306,7 +2321,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
};
-int ip_vs_control_init(void)
+int __init ip_vs_control_init(void)
{
int ret;
int idx;
@@ -2333,8 +2348,6 @@ int ip_vs_control_init(void)
INIT_LIST_HEAD(&ip_vs_rtable[idx]);
}
- memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
- spin_lock_init(&ip_vs_stats.lock);
ip_vs_new_estimator(&ip_vs_stats);
/* Hook the defense timer */
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index 8afc1503ed20..fa66824d264f 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -233,6 +233,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
.name = "dh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
.init_service = ip_vs_dh_init_svc,
.done_service = ip_vs_dh_done_svc,
.update_service = ip_vs_dh_update_svc,
@@ -242,7 +243,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
static int __init ip_vs_dh_init(void)
{
- INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index bc04eedd6dbb..5a20f93bd7f9 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
+#include <linux/list.h>
#include <net/ip_vs.h>
@@ -44,28 +45,11 @@
*/
-struct ip_vs_estimator
-{
- struct ip_vs_estimator *next;
- struct ip_vs_stats *stats;
-
- u32 last_conns;
- u32 last_inpkts;
- u32 last_outpkts;
- u64 last_inbytes;
- u64 last_outbytes;
-
- u32 cps;
- u32 inpps;
- u32 outpps;
- u32 inbps;
- u32 outbps;
-};
-
+static void estimation_timer(unsigned long arg);
-static struct ip_vs_estimator *est_list = NULL;
-static DEFINE_RWLOCK(est_lock);
-static struct timer_list est_timer;
+static LIST_HEAD(est_list);
+static DEFINE_SPINLOCK(est_lock);
+static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
static void estimation_timer(unsigned long arg)
{
@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg)
u64 n_inbytes, n_outbytes;
u32 rate;
- read_lock(&est_lock);
- for (e = est_list; e; e = e->next) {
- s = e->stats;
+ spin_lock(&est_lock);
+ list_for_each_entry(e, &est_list, list) {
+ s = container_of(e, struct ip_vs_stats, est);
spin_lock(&s->lock);
n_conns = s->conns;
@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg)
s->outbps = (e->outbps+0xF)>>5;
spin_unlock(&s->lock);
}
- read_unlock(&est_lock);
+ spin_unlock(&est_lock);
mod_timer(&est_timer, jiffies + 2*HZ);
}
-int ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_new_estimator(struct ip_vs_stats *stats)
{
- struct ip_vs_estimator *est;
+ struct ip_vs_estimator *est = &stats->est;
- est = kzalloc(sizeof(*est), GFP_KERNEL);
- if (est == NULL)
- return -ENOMEM;
+ INIT_LIST_HEAD(&est->list);
- est->stats = stats;
est->last_conns = stats->conns;
est->cps = stats->cps<<10;
@@ -142,59 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
est->last_outbytes = stats->outbytes;
est->outbps = stats->outbps<<5;
- write_lock_bh(&est_lock);
- est->next = est_list;
- if (est->next == NULL) {
- setup_timer(&est_timer, estimation_timer, 0);
- est_timer.expires = jiffies + 2*HZ;
- add_timer(&est_timer);
- }
- est_list = est;
- write_unlock_bh(&est_lock);
- return 0;
+ spin_lock_bh(&est_lock);
+ if (list_empty(&est_list))
+ mod_timer(&est_timer, jiffies + 2 * HZ);
+ list_add(&est->list, &est_list);
+ spin_unlock_bh(&est_lock);
}
void ip_vs_kill_estimator(struct ip_vs_stats *stats)
{
- struct ip_vs_estimator *est, **pest;
- int killed = 0;
-
- write_lock_bh(&est_lock);
- pest = &est_list;
- while ((est=*pest) != NULL) {
- if (est->stats != stats) {
- pest = &est->next;
- continue;
- }
- *pest = est->next;
- kfree(est);
- killed++;
+ struct ip_vs_estimator *est = &stats->est;
+
+ spin_lock_bh(&est_lock);
+ list_del(&est->list);
+ while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
+ spin_unlock_bh(&est_lock);
+ cpu_relax();
+ spin_lock_bh(&est_lock);
}
- if (killed && est_list == NULL)
- del_timer_sync(&est_timer);
- write_unlock_bh(&est_lock);
+ spin_unlock_bh(&est_lock);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{
- struct ip_vs_estimator *e;
-
- write_lock_bh(&est_lock);
- for (e = est_list; e; e = e->next) {
- if (e->stats != stats)
- continue;
-
- /* set counters zero */
- e->last_conns = 0;
- e->last_inpkts = 0;
- e->last_outpkts = 0;
- e->last_inbytes = 0;
- e->last_outbytes = 0;
- e->cps = 0;
- e->inpps = 0;
- e->outpps = 0;
- e->inbps = 0;
- e->outbps = 0;
- }
- write_unlock_bh(&est_lock);
+ struct ip_vs_estimator *est = &stats->est;
+
+ /* set counters zero, caller must hold the stats->lock lock */
+ est->last_inbytes = 0;
+ est->last_outbytes = 0;
+ est->last_conns = 0;
+ est->last_inpkts = 0;
+ est->last_outpkts = 0;
+ est->cps = 0;
+ est->inpps = 0;
+ est->outpps = 0;
+ est->inbps = 0;
+ est->outbps = 0;
}
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 0efa3db4b180..7a6a319f544a 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -539,6 +539,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
.name = "lblc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
.init_service = ip_vs_lblc_init_svc,
.done_service = ip_vs_lblc_done_svc,
.update_service = ip_vs_lblc_update_svc,
@@ -550,7 +551,6 @@ static int __init ip_vs_lblc_init(void)
{
int ret;
- INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 8e3bbeb45138..c234e73968a6 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -728,6 +728,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
.name = "lblcr",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
.init_service = ip_vs_lblcr_init_svc,
.done_service = ip_vs_lblcr_done_svc,
.update_service = ip_vs_lblcr_update_svc,
@@ -739,7 +740,6 @@ static int __init ip_vs_lblcr_init(void)
{
int ret;
- INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ac9f08e065d5..ebcdbf75ac65 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -98,6 +98,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
.name = "lc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
.init_service = ip_vs_lc_init_svc,
.done_service = ip_vs_lc_done_svc,
.update_service = ip_vs_lc_update_svc,
@@ -107,7 +108,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
static int __init ip_vs_lc_init(void)
{
- INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ;
}
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index a46bf258d420..92f3a6770031 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -136,6 +136,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
.name = "nq",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
.init_service = ip_vs_nq_init_svc,
.done_service = ip_vs_nq_done_svc,
.update_service = ip_vs_nq_update_svc,
@@ -145,7 +146,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
static int __init ip_vs_nq_init(void)
{
- INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 876714f23d65..6099a88fc200 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
/*
* register an ipvs protocol
*/
-static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp)
+static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
{
unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
@@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
}
-int ip_vs_protocol_init(void)
+int __init ip_vs_protocol_init(void)
{
char protocols[64];
#define REGISTER_PROTOCOL(p) \
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index c8db12d39e61..358110d17e59 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -94,6 +94,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
.name = "rr", /* name */
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
.init_service = ip_vs_rr_init_svc,
.done_service = ip_vs_rr_done_svc,
.update_service = ip_vs_rr_update_svc,
@@ -102,7 +103,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
static int __init ip_vs_rr_init(void)
{
- INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_rr_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index b64767309855..a46ad9e35016 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
write_lock_bh(&__ip_vs_sched_lock);
- if (scheduler->n_list.next != &scheduler->n_list) {
+ if (!list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock);
ip_vs_use_count_dec();
IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
@@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
}
write_lock_bh(&__ip_vs_sched_lock);
- if (scheduler->n_list.next == &scheduler->n_list) {
+ if (list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock);
IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler "
"is not in the list. failed\n", scheduler->name);
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 2a7d31358181..77663d84cbd1 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -138,6 +138,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
.name = "sed",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
.init_service = ip_vs_sed_init_svc,
.done_service = ip_vs_sed_done_svc,
.update_service = ip_vs_sed_update_svc,
@@ -147,7 +148,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
static int __init ip_vs_sed_init(void)
{
- INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_sed_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index b8fdfac65001..7b979e228056 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -230,6 +230,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
.name = "sh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
.init_service = ip_vs_sh_init_svc,
.done_service = ip_vs_sh_done_svc,
.update_service = ip_vs_sh_update_svc,
@@ -239,7 +240,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
static int __init ip_vs_sh_init(void)
{
- INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 45e9bd96c286..a652da2c3200 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -904,9 +904,9 @@ int stop_sync_thread(int state)
* progress of stopping the master sync daemon.
*/
- spin_lock(&ip_vs_sync_lock);
+ spin_lock_bh(&ip_vs_sync_lock);
ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
- spin_unlock(&ip_vs_sync_lock);
+ spin_unlock_bh(&ip_vs_sync_lock);
kthread_stop(sync_master_thread);
sync_master_thread = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 772c3cb4eca1..9b0ef86bb1f7 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -126,6 +126,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
.name = "wlc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
.init_service = ip_vs_wlc_init_svc,
.done_service = ip_vs_wlc_done_svc,
.update_service = ip_vs_wlc_update_svc,
@@ -135,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
static int __init ip_vs_wlc_init(void)
{
- INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 1d6932d7dc97..0d86a79b87b5 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -212,6 +212,7 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
.name = "wrr",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
.init_service = ip_vs_wrr_init_svc,
.done_service = ip_vs_wrr_done_svc,
.update_service = ip_vs_wrr_update_svc,
@@ -220,7 +221,6 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
static int __init ip_vs_wrr_init(void)
{
- INIT_LIST_HEAD(&ip_vs_wrr_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 383d17359d01..8e42fbbd5761 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -989,7 +989,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
up->encap_rcv != NULL) {
int ret;
+ bh_unlock_sock(sk);
ret = (*up->encap_rcv)(sk, skb);
+ bh_lock_sock(sk);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
@@ -1092,7 +1094,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
if (skb1) {
int ret = 0;
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
ret = udp_queue_rcv_skb(sk, skb1);
else
@@ -1194,7 +1196,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
if (sk != NULL) {
int ret = 0;
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
ret = udp_queue_rcv_skb(sk, skb);
else
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5a3e87e4b18f..41b165ffb369 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2187,8 +2187,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
#endif
NLA_PUT_U32(skb, RTA_IIF, iif);
} else if (dst) {
+ struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
struct in6_addr saddr_buf;
- if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
+ if (ipv6_dev_get_saddr(idev ? idev->dev : NULL,
dst, 0, &saddr_buf) == 0)
NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d1477b350f76..a6aecf76a71b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
uh->source, saddr, dif))) {
struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
if (buff) {
- bh_lock_sock_nested(sk2);
+ bh_lock_sock(sk2);
if (!sock_owned_by_user(sk2))
udpv6_queue_rcv_skb(sk2, buff);
else
@@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
bh_unlock_sock(sk2);
}
}
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else
@@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
/* deliver */
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index bdfb77417794..77228f28fa36 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -100,7 +100,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
trans = rxrpc_get_transport(local, peer, GFP_NOIO);
rxrpc_put_peer(peer);
- if (!trans) {
+ if (IS_ERR(trans)) {
_debug("no trans");
ret = -EBUSY;
goto error;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 26c7e1f9a350..9974b3f04f05 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -751,7 +751,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
struct nlattr *tb[TCA_ACT_MAX+1];
struct nlattr *kind;
struct tc_action *a = create_a(0);
- int err = -EINVAL;
+ int err = -ENOMEM;
if (a == NULL) {
printk("tca_action_flush: couldnt create tc_action\n");
@@ -762,7 +762,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
if (!skb) {
printk("tca_action_flush: failed skb alloc\n");
kfree(a);
- return -ENOBUFS;
+ return err;
}
b = skb_tail_pointer(skb);
@@ -790,6 +790,8 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
if (err < 0)
goto nla_put_failure;
+ if (err == 0)
+ goto noflush_out;
nla_nest_end(skb, nest);
@@ -807,6 +809,7 @@ nla_put_failure:
nlmsg_failure:
module_put(a->ops->owner);
err_out:
+noflush_out:
kfree_skb(skb);
kfree(a);
return err;
@@ -824,8 +827,10 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
return ret;
if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
- if (tb[0] != NULL && tb[1] == NULL)
- return tca_action_flush(tb[0], n, pid);
+ if (tb[1] != NULL)
+ return tca_action_flush(tb[1], n, pid);
+ else
+ return -EINVAL;
}
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ba1d121f3127..c25465e5607a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -183,6 +183,21 @@ EXPORT_SYMBOL(unregister_qdisc);
(root qdisc, all its children, children of children etc.)
*/
+struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
+{
+ struct Qdisc *q;
+
+ if (!(root->flags & TCQ_F_BUILTIN) &&
+ root->handle == handle)
+ return root;
+
+ list_for_each_entry(q, &root->list, list) {
+ if (q->handle == handle)
+ return q;
+ }
+ return NULL;
+}
+
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
unsigned int i;
@@ -191,16 +206,11 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
- if (!(txq_root->flags & TCQ_F_BUILTIN) &&
- txq_root->handle == handle)
- return txq_root;
-
- list_for_each_entry(q, &txq_root->list, list) {
- if (q->handle == handle)
- return q;
- }
+ q = qdisc_match_from_root(txq_root, handle);
+ if (q)
+ return q;
}
- return NULL;
+ return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
}
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -321,7 +331,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
if (!s || tsize != s->tsize || (!tab && tsize > 0))
return ERR_PTR(-EINVAL);
- spin_lock(&qdisc_stab_lock);
+ spin_lock_bh(&qdisc_stab_lock);
list_for_each_entry(stab, &qdisc_stab_list, list) {
if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -329,11 +339,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
continue;
stab->refcnt++;
- spin_unlock(&qdisc_stab_lock);
+ spin_unlock_bh(&qdisc_stab_lock);
return stab;
}
- spin_unlock(&qdisc_stab_lock);
+ spin_unlock_bh(&qdisc_stab_lock);
stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
if (!stab)
@@ -344,9 +354,9 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
if (tsize > 0)
memcpy(stab->data, tab, tsize * sizeof(u16));
- spin_lock(&qdisc_stab_lock);
+ spin_lock_bh(&qdisc_stab_lock);
list_add_tail(&stab->list, &qdisc_stab_list);
- spin_unlock(&qdisc_stab_lock);
+ spin_unlock_bh(&qdisc_stab_lock);
return stab;
}
@@ -356,14 +366,14 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
if (!tab)
return;
- spin_lock(&qdisc_stab_lock);
+ spin_lock_bh(&qdisc_stab_lock);
if (--tab->refcnt == 0) {
list_del(&tab->list);
kfree(tab);
}
- spin_unlock(&qdisc_stab_lock);
+ spin_unlock_bh(&qdisc_stab_lock);
}
EXPORT_SYMBOL(qdisc_put_stab);
@@ -908,7 +918,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /* ingress */
- q = dev->rx_queue.qdisc;
+ q = dev->rx_queue.qdisc_sleeping;
}
} else {
struct netdev_queue *dev_queue;
@@ -978,7 +988,7 @@ replay:
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /*ingress */
- q = dev->rx_queue.qdisc;
+ q = dev->rx_queue.qdisc_sleeping;
}
} else {
struct netdev_queue *dev_queue;
@@ -1529,11 +1539,11 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
t = 0;
dev_queue = netdev_get_tx_queue(dev, 0);
- if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0)
+ if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
goto done;
dev_queue = &dev->rx_queue;
- if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0)
+ if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
goto done;
done:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7cf83b37459d..468574682caa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -647,7 +647,7 @@ static void dev_deactivate_queue(struct net_device *dev,
}
}
-static bool some_qdisc_is_running(struct net_device *dev, int lock)
+static bool some_qdisc_is_busy(struct net_device *dev, int lock)
{
unsigned int i;
@@ -658,13 +658,14 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
int val;
dev_queue = netdev_get_tx_queue(dev, i);
- q = dev_queue->qdisc;
+ q = dev_queue->qdisc_sleeping;
root_lock = qdisc_lock(q);
if (lock)
spin_lock_bh(root_lock);
- val = test_bit(__QDISC_STATE_RUNNING, &q->state);
+ val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
+ test_bit(__QDISC_STATE_SCHED, &q->state));
if (lock)
spin_unlock_bh(root_lock);
@@ -689,14 +690,14 @@ void dev_deactivate(struct net_device *dev)
/* Wait for outstanding qdisc_run calls. */
do {
- while (some_qdisc_is_running(dev, 0))
+ while (some_qdisc_is_busy(dev, 0))
yield();
/*
* Double-check inside queue lock to ensure that all effects
* of the queue run are visible when we return.
*/
- running = some_qdisc_is_running(dev, 1);
+ running = some_qdisc_is_busy(dev, 1);
/*
* The running flag should never be set at this point because
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index be35422711a3..6febd245e62b 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1279,7 +1279,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
/* delete from hash and active; remainder in destroy_class */
qdisc_class_hash_remove(&q->clhash, &cl->common);
- cl->parent->children--;
+ if (cl->parent)
+ cl->parent->children--;
if (cl->prio_activity)
htb_deactivate(q, cl);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0326d3060bc7..0747d8a9232f 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -85,7 +85,7 @@ static struct top_srv topsrv = { 0 };
static u32 htohl(u32 in, int swap)
{
- return swap ? (u32)___constant_swab32(in) : in;
+ return swap ? swab32(in) : in;
}
/**
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index df5b3886c36b..d98ffb75119a 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1277,6 +1277,7 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev,
r->ifi_flags = dev_get_flags(dev);
r->ifi_change = 0; /* Wireless changes don't affect those flags */
+ NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
/* Add the wireless events in the netlink packet */
NLA_PUT(skb, IFLA_WIRELESS, event_len, event);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 3f964db908a7..ac25b4c0e982 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -112,16 +112,13 @@ error_nolock:
int xfrm_output_resume(struct sk_buff *skb, int err)
{
while (likely((err = xfrm_output_one(skb, err)) == 0)) {
- struct xfrm_state *x;
-
nf_reset(skb);
err = skb->dst->ops->local_out(skb);
if (unlikely(err != 1))
goto out;
- x = skb->dst->xfrm;
- if (!x)
+ if (!skb->dst->xfrm)
return dst_output(skb);
err = nf_hook(skb->dst->ops->family,
diff --git a/security/capability.c b/security/capability.c
index 63d10da515a5..245874819036 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -811,7 +811,8 @@ struct security_operations default_security_ops = {
void security_fixup_ops(struct security_operations *ops)
{
- set_to_cap_if_null(ops, ptrace);
+ set_to_cap_if_null(ops, ptrace_may_access);
+ set_to_cap_if_null(ops, ptrace_traceme);
set_to_cap_if_null(ops, capget);
set_to_cap_if_null(ops, capset_check);
set_to_cap_if_null(ops, capset_set);
diff --git a/security/commoncap.c b/security/commoncap.c
index 4afbece37a08..e4c4b3fc0c04 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -63,14 +63,24 @@ int cap_settime(struct timespec *ts, struct timezone *tz)
return 0;
}
-int cap_ptrace (struct task_struct *parent, struct task_struct *child,
- unsigned int mode)
+int cap_ptrace_may_access(struct task_struct *child, unsigned int mode)
{
/* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */
- if (!cap_issubset(child->cap_permitted, parent->cap_permitted) &&
- !__capable(parent, CAP_SYS_PTRACE))
- return -EPERM;
- return 0;
+ if (cap_issubset(child->cap_permitted, current->cap_permitted))
+ return 0;
+ if (capable(CAP_SYS_PTRACE))
+ return 0;
+ return -EPERM;
+}
+
+int cap_ptrace_traceme(struct task_struct *parent)
+{
+ /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */
+ if (cap_issubset(current->cap_permitted, parent->cap_permitted))
+ return 0;
+ if (has_capability(parent, CAP_SYS_PTRACE))
+ return 0;
+ return -EPERM;
}
int cap_capget (struct task_struct *target, kernel_cap_t *effective,
@@ -534,7 +544,7 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid,
static inline int cap_safe_nice(struct task_struct *p)
{
if (!cap_issubset(p->cap_permitted, current->cap_permitted) &&
- !__capable(current, CAP_SYS_NICE))
+ !capable(CAP_SYS_NICE))
return -EPERM;
return 0;
}
diff --git a/security/root_plug.c b/security/root_plug.c
index be0ebec2580b..c3f68b5b372d 100644
--- a/security/root_plug.c
+++ b/security/root_plug.c
@@ -72,7 +72,8 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
static struct security_operations rootplug_security_ops = {
/* Use the capability functions for some of the hooks */
- .ptrace = cap_ptrace,
+ .ptrace_may_access = cap_ptrace_may_access,
+ .ptrace_traceme = cap_ptrace_traceme,
.capget = cap_capget,
.capset_check = cap_capset_check,
.capset_set = cap_capset_set,
diff --git a/security/security.c b/security/security.c
index ff7068727757..3a4b4f55b33f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -127,10 +127,14 @@ int register_security(struct security_operations *ops)
/* Security operations */
-int security_ptrace(struct task_struct *parent, struct task_struct *child,
- unsigned int mode)
+int security_ptrace_may_access(struct task_struct *child, unsigned int mode)
{
- return security_ops->ptrace(parent, child, mode);
+ return security_ops->ptrace_may_access(child, mode);
+}
+
+int security_ptrace_traceme(struct task_struct *parent)
+{
+ return security_ops->ptrace_traceme(parent);
}
int security_capget(struct task_struct *target,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 3ae9bec5a508..03fc6a81ae32 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1738,24 +1738,34 @@ static inline u32 file_to_av(struct file *file)
/* Hook functions begin here. */
-static int selinux_ptrace(struct task_struct *parent,
- struct task_struct *child,
- unsigned int mode)
+static int selinux_ptrace_may_access(struct task_struct *child,
+ unsigned int mode)
{
int rc;
- rc = secondary_ops->ptrace(parent, child, mode);
+ rc = secondary_ops->ptrace_may_access(child, mode);
if (rc)
return rc;
if (mode == PTRACE_MODE_READ) {
- struct task_security_struct *tsec = parent->security;
+ struct task_security_struct *tsec = current->security;
struct task_security_struct *csec = child->security;
return avc_has_perm(tsec->sid, csec->sid,
SECCLASS_FILE, FILE__READ, NULL);
}
- return task_has_perm(parent, child, PROCESS__PTRACE);
+ return task_has_perm(current, child, PROCESS__PTRACE);
+}
+
+static int selinux_ptrace_traceme(struct task_struct *parent)
+{
+ int rc;
+
+ rc = secondary_ops->ptrace_traceme(parent);
+ if (rc)
+ return rc;
+
+ return task_has_perm(parent, current, PROCESS__PTRACE);
}
static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
@@ -5346,7 +5356,8 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
static struct security_operations selinux_ops = {
.name = "selinux",
- .ptrace = selinux_ptrace,
+ .ptrace_may_access = selinux_ptrace_may_access,
+ .ptrace_traceme = selinux_ptrace_traceme,
.capget = selinux_capget,
.capset_check = selinux_capset_check,
.capset_set = selinux_capset_set,
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 1b40e558f983..87d75417ea93 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -87,27 +87,46 @@ struct inode_smack *new_inode_smack(char *smack)
*/
/**
- * smack_ptrace - Smack approval on ptrace
- * @ptp: parent task pointer
+ * smack_ptrace_may_access - Smack approval on PTRACE_ATTACH
* @ctp: child task pointer
*
* Returns 0 if access is OK, an error code otherwise
*
* Do the capability checks, and require read and write.
*/
-static int smack_ptrace(struct task_struct *ptp, struct task_struct *ctp,
- unsigned int mode)
+static int smack_ptrace_may_access(struct task_struct *ctp, unsigned int mode)
{
int rc;
- rc = cap_ptrace(ptp, ctp, mode);
+ rc = cap_ptrace_may_access(ctp, mode);
if (rc != 0)
return rc;
- rc = smk_access(ptp->security, ctp->security, MAY_READWRITE);
- if (rc != 0 && __capable(ptp, CAP_MAC_OVERRIDE))
+ rc = smk_access(current->security, ctp->security, MAY_READWRITE);
+ if (rc != 0 && capable(CAP_MAC_OVERRIDE))
return 0;
+ return rc;
+}
+
+/**
+ * smack_ptrace_traceme - Smack approval on PTRACE_TRACEME
+ * @ptp: parent task pointer
+ *
+ * Returns 0 if access is OK, an error code otherwise
+ *
+ * Do the capability checks, and require read and write.
+ */
+static int smack_ptrace_traceme(struct task_struct *ptp)
+{
+ int rc;
+
+ rc = cap_ptrace_traceme(ptp);
+ if (rc != 0)
+ return rc;
+ rc = smk_access(ptp->security, current->security, MAY_READWRITE);
+ if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE))
+ return 0;
return rc;
}
@@ -923,7 +942,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
*/
file = container_of(fown, struct file, f_owner);
rc = smk_access(file->f_security, tsk->security, MAY_WRITE);
- if (rc != 0 && __capable(tsk, CAP_MAC_OVERRIDE))
+ if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
return 0;
return rc;
}
@@ -1164,12 +1183,12 @@ static int smack_task_wait(struct task_struct *p)
* account for the smack labels having gotten to
* be different in the first place.
*
- * This breaks the strict subjet/object access
+ * This breaks the strict subject/object access
* control ideal, taking the object's privilege
* state into account in the decision as well as
* the smack value.
*/
- if (capable(CAP_MAC_OVERRIDE) || __capable(p, CAP_MAC_OVERRIDE))
+ if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE))
return 0;
return rc;
@@ -2016,9 +2035,6 @@ static int smack_setprocattr(struct task_struct *p, char *name,
{
char *newsmack;
- if (!__capable(p, CAP_MAC_ADMIN))
- return -EPERM;
-
/*
* Changing another process' Smack value is too dangerous
* and supports no sane use case.
@@ -2026,6 +2042,9 @@ static int smack_setprocattr(struct task_struct *p, char *name,
if (p != current)
return -EPERM;
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
if (value == NULL || size == 0 || size >= SMK_LABELLEN)
return -EINVAL;
@@ -2552,7 +2571,8 @@ static void smack_release_secctx(char *secdata, u32 seclen)
struct security_operations smack_ops = {
.name = "smack",
- .ptrace = smack_ptrace,
+ .ptrace_may_access = smack_ptrace_may_access,
+ .ptrace_traceme = smack_ptrace_traceme,
.capget = cap_capget,
.capset_check = cap_capset_check,
.capset_set = cap_capset_set,
@@ -2729,4 +2749,3 @@ static __init int smack_init(void)
* all processes and objects when they are created.
*/
security_initcall(smack_init);
-
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index f7d95b224a98..31f52d3fc21f 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -845,7 +845,7 @@ config SND_VIRTUOSO
select SND_OXYGEN_LIB
help
Say Y here to include support for sound cards based on the
- Asus AV100/AV200 chips, i.e., Xonar D2, DX and D2X.
+ Asus AV100/AV200 chips, i.e., Xonar D1, DX, D2 and D2X.
To compile this driver as a module, choose M here: the module
will be called snd-virtuoso.
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index ef9f072b47fc..a73d6ca0a906 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -101,6 +101,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
"{Intel, ICH8},"
"{Intel, ICH9},"
"{Intel, ICH10},"
+ "{Intel, PCH},"
"{Intel, SCH},"
"{ATI, SB450},"
"{ATI, SB600},"
@@ -2263,6 +2264,8 @@ static struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x8086, 0x293f), .driver_data = AZX_DRIVER_ICH },
{ PCI_DEVICE(0x8086, 0x3a3e), .driver_data = AZX_DRIVER_ICH },
{ PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
+ /* PCH */
+ { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
/* ATI SB 450/600 */
@@ -2272,6 +2275,7 @@ static struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI },
{ PCI_DEVICE(0x1002, 0x7919), .driver_data = AZX_DRIVER_ATIHDMI },
{ PCI_DEVICE(0x1002, 0x960f), .driver_data = AZX_DRIVER_ATIHDMI },
+ { PCI_DEVICE(0x1002, 0x970f), .driver_data = AZX_DRIVER_ATIHDMI },
{ PCI_DEVICE(0x1002, 0xaa00), .driver_data = AZX_DRIVER_ATIHDMI },
{ PCI_DEVICE(0x1002, 0xaa08), .driver_data = AZX_DRIVER_ATIHDMI },
{ PCI_DEVICE(0x1002, 0xaa10), .driver_data = AZX_DRIVER_ATIHDMI },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index add4e87e0b20..b80e725432f0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6437,6 +6437,39 @@ static void alc882_auto_init_analog_input(struct hda_codec *codec)
}
}
+static void alc882_auto_init_input_src(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ const struct hda_input_mux *imux = spec->input_mux;
+ int c;
+
+ for (c = 0; c < spec->num_adc_nids; c++) {
+ hda_nid_t conn_list[HDA_MAX_NUM_INPUTS];
+ hda_nid_t nid = spec->capsrc_nids[c];
+ int conns, mute, idx, item;
+
+ conns = snd_hda_get_connections(codec, nid, conn_list,
+ ARRAY_SIZE(conn_list));
+ if (conns < 0)
+ continue;
+ for (idx = 0; idx < conns; idx++) {
+ /* if the current connection is the selected one,
+ * unmute it as default - otherwise mute it
+ */
+ mute = AMP_IN_MUTE(idx);
+ for (item = 0; item < imux->num_items; item++) {
+ if (imux->items[item].index == idx) {
+ if (spec->cur_mux[c] == item)
+ mute = AMP_IN_UNMUTE(idx);
+ break;
+ }
+ }
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, mute);
+ }
+ }
+}
+
/* add mic boosts if needed */
static int alc_auto_add_mic_boost(struct hda_codec *codec)
{
@@ -6491,6 +6524,7 @@ static void alc882_auto_init(struct hda_codec *codec)
alc882_auto_init_multi_out(codec);
alc882_auto_init_hp_out(codec);
alc882_auto_init_analog_input(codec);
+ alc882_auto_init_input_src(codec);
if (spec->unsol_event)
alc_sku_automute(codec);
}
@@ -8285,6 +8319,8 @@ static void alc883_auto_init_analog_input(struct hda_codec *codec)
}
}
+#define alc883_auto_init_input_src alc882_auto_init_input_src
+
/* almost identical with ALC880 parser... */
static int alc883_parse_auto_config(struct hda_codec *codec)
{
@@ -8315,6 +8351,7 @@ static void alc883_auto_init(struct hda_codec *codec)
alc883_auto_init_multi_out(codec);
alc883_auto_init_hp_out(codec);
alc883_auto_init_analog_input(codec);
+ alc883_auto_init_input_src(codec);
if (spec->unsol_event)
alc_sku_automute(codec);
}
@@ -9663,6 +9700,7 @@ static int alc262_parse_auto_config(struct hda_codec *codec)
#define alc262_auto_init_multi_out alc882_auto_init_multi_out
#define alc262_auto_init_hp_out alc882_auto_init_hp_out
#define alc262_auto_init_analog_input alc882_auto_init_analog_input
+#define alc262_auto_init_input_src alc882_auto_init_input_src
/* init callback for auto-configuration model -- overriding the default init */
@@ -9672,6 +9710,7 @@ static void alc262_auto_init(struct hda_codec *codec)
alc262_auto_init_multi_out(codec);
alc262_auto_init_hp_out(codec);
alc262_auto_init_analog_input(codec);
+ alc262_auto_init_input_src(codec);
if (spec->unsol_event)
alc_sku_automute(codec);
}
@@ -13330,6 +13369,8 @@ static void alc861vd_auto_init_analog_input(struct hda_codec *codec)
}
}
+#define alc861vd_auto_init_input_src alc882_auto_init_input_src
+
#define alc861vd_idx_to_mixer_vol(nid) ((nid) + 0x02)
#define alc861vd_idx_to_mixer_switch(nid) ((nid) + 0x0c)
@@ -13512,6 +13553,7 @@ static void alc861vd_auto_init(struct hda_codec *codec)
alc861vd_auto_init_multi_out(codec);
alc861vd_auto_init_hp_out(codec);
alc861vd_auto_init_analog_input(codec);
+ alc861vd_auto_init_input_src(codec);
if (spec->unsol_event)
alc_sku_automute(codec);
}
@@ -14677,6 +14719,8 @@ static void alc662_auto_init_analog_input(struct hda_codec *codec)
}
}
+#define alc662_auto_init_input_src alc882_auto_init_input_src
+
static int alc662_parse_auto_config(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -14733,6 +14777,7 @@ static void alc662_auto_init(struct hda_codec *codec)
alc662_auto_init_multi_out(codec);
alc662_auto_init_hp_out(codec);
alc662_auto_init_analog_input(codec);
+ alc662_auto_init_input_src(codec);
if (spec->unsol_event)
alc_sku_automute(codec);
}
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index 9a2c16bf94e0..01d7b75f9182 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -36,15 +36,15 @@
*/
/*
- * Xonar DX
- * --------
+ * Xonar D1/DX
+ * -----------
*
* CMI8788:
*
* I²C <-> CS4398 (front)
* <-> CS4362A (surround, center/LFE, back)
*
- * GPI 0 <- external power present
+ * GPI 0 <- external power present (DX only)
*
* GPIO 0 -> enable output to speakers
* GPIO 1 -> enable front panel I/O
@@ -96,6 +96,7 @@ MODULE_PARM_DESC(enable, "enable card");
enum {
MODEL_D2,
MODEL_D2X,
+ MODEL_D1,
MODEL_DX,
};
@@ -103,6 +104,7 @@ static struct pci_device_id xonar_ids[] __devinitdata = {
{ OXYGEN_PCI_SUBID(0x1043, 0x8269), .driver_data = MODEL_D2 },
{ OXYGEN_PCI_SUBID(0x1043, 0x8275), .driver_data = MODEL_DX },
{ OXYGEN_PCI_SUBID(0x1043, 0x82b7), .driver_data = MODEL_D2X },
+ { OXYGEN_PCI_SUBID(0x1043, 0x834f), .driver_data = MODEL_D1 },
{ }
};
MODULE_DEVICE_TABLE(pci, xonar_ids);
@@ -313,15 +315,12 @@ static void cs43xx_init(struct oxygen *chip)
cs4362a_write(chip, 0x01, CS4362A_CPEN);
}
-static void xonar_dx_init(struct oxygen *chip)
+static void xonar_d1_init(struct oxygen *chip)
{
struct xonar_data *data = chip->model_data;
data->anti_pop_delay = 800;
data->output_enable_bit = GPIO_DX_OUTPUT_ENABLE;
- data->ext_power_reg = OXYGEN_GPI_DATA;
- data->ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
- data->ext_power_bit = GPI_DX_EXT_POWER;
data->cs4398_fm = CS4398_FM_SINGLE | CS4398_DEM_NONE | CS4398_DIF_LJUST;
data->cs4362a_fm = CS4362A_FM_SINGLE |
CS4362A_ATAPI_B_R | CS4362A_ATAPI_A_L;
@@ -345,6 +344,16 @@ static void xonar_dx_init(struct oxygen *chip)
snd_component_add(chip->card, "CS5361");
}
+static void xonar_dx_init(struct oxygen *chip)
+{
+ struct xonar_data *data = chip->model_data;
+
+ data->ext_power_reg = OXYGEN_GPI_DATA;
+ data->ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
+ data->ext_power_bit = GPI_DX_EXT_POWER;
+ xonar_d1_init(chip);
+}
+
static void xonar_cleanup(struct oxygen *chip)
{
struct xonar_data *data = chip->model_data;
@@ -352,7 +361,7 @@ static void xonar_cleanup(struct oxygen *chip)
oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, data->output_enable_bit);
}
-static void xonar_dx_cleanup(struct oxygen *chip)
+static void xonar_d1_cleanup(struct oxygen *chip)
{
xonar_cleanup(chip);
cs4362a_write(chip, 0x01, CS4362A_PDN | CS4362A_CPEN);
@@ -365,7 +374,7 @@ static void xonar_d2_resume(struct oxygen *chip)
xonar_enable_output(chip);
}
-static void xonar_dx_resume(struct oxygen *chip)
+static void xonar_d1_resume(struct oxygen *chip)
{
cs43xx_init(chip);
xonar_enable_output(chip);
@@ -513,7 +522,7 @@ static const struct snd_kcontrol_new front_panel_switch = {
.put = front_panel_put,
};
-static void xonar_dx_ac97_switch(struct oxygen *chip,
+static void xonar_d1_ac97_switch(struct oxygen *chip,
unsigned int reg, unsigned int mute)
{
if (reg == AC97_LINE) {
@@ -536,7 +545,7 @@ static int xonar_d2_control_filter(struct snd_kcontrol_new *template)
return 0;
}
-static int xonar_dx_control_filter(struct snd_kcontrol_new *template)
+static int xonar_d1_control_filter(struct snd_kcontrol_new *template)
{
if (!strncmp(template->name, "CD Capture ", 11))
return 1; /* no CD input */
@@ -548,7 +557,7 @@ static int xonar_mixer_init(struct oxygen *chip)
return snd_ctl_add(chip->card, snd_ctl_new1(&alt_switch, chip));
}
-static int xonar_dx_mixer_init(struct oxygen *chip)
+static int xonar_d1_mixer_init(struct oxygen *chip)
{
return snd_ctl_add(chip->card, snd_ctl_new1(&front_panel_switch, chip));
}
@@ -615,23 +624,51 @@ static const struct oxygen_model xonar_models[] = {
.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
},
+ [MODEL_D1] = {
+ .shortname = "Xonar D1",
+ .longname = "Asus Virtuoso 100",
+ .chip = "AV200",
+ .owner = THIS_MODULE,
+ .init = xonar_d1_init,
+ .control_filter = xonar_d1_control_filter,
+ .mixer_init = xonar_d1_mixer_init,
+ .cleanup = xonar_d1_cleanup,
+ .suspend = xonar_d1_cleanup,
+ .resume = xonar_d1_resume,
+ .set_dac_params = set_cs43xx_params,
+ .set_adc_params = set_cs53x1_params,
+ .update_dac_volume = update_cs43xx_volume,
+ .update_dac_mute = update_cs43xx_mute,
+ .ac97_switch = xonar_d1_ac97_switch,
+ .dac_tlv = cs4362a_db_scale,
+ .model_data_size = sizeof(struct xonar_data),
+ .pcm_dev_cfg = PLAYBACK_0_TO_I2S |
+ PLAYBACK_1_TO_SPDIF |
+ CAPTURE_0_FROM_I2S_2,
+ .dac_channels = 8,
+ .dac_volume_min = 0,
+ .dac_volume_max = 127,
+ .function_flags = OXYGEN_FUNCTION_2WIRE,
+ .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+ .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+ },
[MODEL_DX] = {
.shortname = "Xonar DX",
.longname = "Asus Virtuoso 100",
.chip = "AV200",
.owner = THIS_MODULE,
.init = xonar_dx_init,
- .control_filter = xonar_dx_control_filter,
- .mixer_init = xonar_dx_mixer_init,
- .cleanup = xonar_dx_cleanup,
- .suspend = xonar_dx_cleanup,
- .resume = xonar_dx_resume,
+ .control_filter = xonar_d1_control_filter,
+ .mixer_init = xonar_d1_mixer_init,
+ .cleanup = xonar_d1_cleanup,
+ .suspend = xonar_d1_cleanup,
+ .resume = xonar_d1_resume,
.set_dac_params = set_cs43xx_params,
.set_adc_params = set_cs53x1_params,
.update_dac_volume = update_cs43xx_volume,
.update_dac_mute = update_cs43xx_mute,
.gpio_changed = xonar_gpio_changed,
- .ac97_switch = xonar_dx_ac97_switch,
+ .ac97_switch = xonar_d1_ac97_switch,
.dac_tlv = cs4362a_db_scale,
.model_data_size = sizeof(struct xonar_data),
.pcm_dev_cfg = PLAYBACK_0_TO_I2S |
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index e23cb09f0d14..c6a8edf302ad 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -348,8 +348,9 @@ static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("ROUT1"),
SND_SOC_DAPM_OUTPUT("LOUT2"),
SND_SOC_DAPM_OUTPUT("ROUT2"),
- SND_SOC_DAPM_OUTPUT("MONO"),
+ SND_SOC_DAPM_OUTPUT("MONO1"),
SND_SOC_DAPM_OUTPUT("OUT3"),
+ SND_SOC_DAPM_OUTPUT("VREF"),
SND_SOC_DAPM_INPUT("LINPUT1"),
SND_SOC_DAPM_INPUT("LINPUT2"),
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 3ecce5168e94..e44153fa38de 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -82,7 +82,7 @@ static const u16 wm8990_reg[] = {
0x0003, /* R35 - ClassD1 */
0x0000, /* R36 */
0x0100, /* R37 - ClassD3 */
- 0x0000, /* R38 */
+ 0x0079, /* R38 - ClassD4 */
0x0000, /* R39 - Input Mixer1 */
0x0000, /* R40 - Input Mixer2 */
0x0000, /* R41 - Input Mixer3 */
@@ -311,11 +311,15 @@ SOC_SINGLE("Speaker Mode Switch", WM8990_CLASSD1,
WM8990_CDMODE_BIT, 1, 0),
SOC_SINGLE("Speaker Output Attenuation Volume", WM8990_SPEAKER_VOLUME,
- WM8990_SPKVOL_SHIFT, WM8990_SPKVOL_MASK, 0),
+ WM8990_SPKATTN_SHIFT, WM8990_SPKATTN_MASK, 0),
SOC_SINGLE("Speaker DC Boost Volume", WM8990_CLASSD3,
WM8990_DCGAIN_SHIFT, WM8990_DCGAIN_MASK, 0),
SOC_SINGLE("Speaker AC Boost Volume", WM8990_CLASSD3,
WM8990_ACGAIN_SHIFT, WM8990_ACGAIN_MASK, 0),
+SOC_SINGLE_TLV("Speaker Volume", WM8990_CLASSD4,
+ WM8990_SPKVOL_SHIFT, WM8990_SPKVOL_MASK, 0, out_pga_tlv),
+SOC_SINGLE("Speaker ZC Switch", WM8990_CLASSD4,
+ WM8990_SPKZC_SHIFT, WM8990_SPKZC_MASK, 0),
SOC_WM899X_OUTPGA_SINGLE_R_TLV("Left DAC Digital Volume",
WM8990_LEFT_DAC_DIGITAL_VOLUME,
@@ -920,7 +924,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"SPKMIX", "SPKMIX Left Mixer PGA Switch", "LOPGA"},
{"SPKMIX", "SPKMIX Right Mixer PGA Switch", "ROPGA"},
{"SPKMIX", "SPKMIX Right DAC Switch", "Right DAC"},
- {"SPKMIX", "SPKMIX Left DAC Switch", "Right DAC"},
+ {"SPKMIX", "SPKMIX Left DAC Switch", "Left DAC"},
/* LONMIX */
{"LONMIX", "LONMIX Left Mixer PGA Switch", "LOPGA"},
diff --git a/sound/soc/codecs/wm8990.h b/sound/soc/codecs/wm8990.h
index 6bea57485283..0a08325d5443 100644
--- a/sound/soc/codecs/wm8990.h
+++ b/sound/soc/codecs/wm8990.h
@@ -54,6 +54,7 @@
#define WM8990_SPEAKER_VOLUME 0x22
#define WM8990_CLASSD1 0x23
#define WM8990_CLASSD3 0x25
+#define WM8990_CLASSD4 0x26
#define WM8990_INPUT_MIXER1 0x27
#define WM8990_INPUT_MIXER2 0x28
#define WM8990_INPUT_MIXER3 0x29
@@ -528,8 +529,8 @@
/*
* R34 (0x22) - Speaker Volume
*/
-#define WM8990_SPKVOL_MASK 0x0003 /* SPKVOL - [1:0] */
-#define WM8990_SPKVOL_SHIFT 0
+#define WM8990_SPKATTN_MASK 0x0003 /* SPKATTN - [1:0] */
+#define WM8990_SPKATTN_SHIFT 0
/*
* R35 (0x23) - ClassD1
@@ -544,6 +545,15 @@
#define WM8990_DCGAIN_SHIFT 3
#define WM8990_ACGAIN_MASK 0x0007 /* ACGAIN - [2:0] */
#define WM8990_ACGAIN_SHIFT 0
+
+/*
+ * R38 (0x26) - ClassD4
+ */
+#define WM8990_SPKZC_MASK 0x0001 /* SPKZC */
+#define WM8990_SPKZC_SHIFT 7 /* SPKZC */
+#define WM8990_SPKVOL_MASK 0x007F /* SPKVOL - [6:0] */
+#define WM8990_SPKVOL_SHIFT 0 /* SPKVOL - [6:0] */
+
/*
* R39 (0x27) - Input Mixer1
*/
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index eefc25b83514..37cb768fc933 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -297,7 +297,7 @@ static int spitz_wm8750_init(struct snd_soc_codec *codec)
snd_soc_dapm_disable_pin(codec, "LINPUT3");
snd_soc_dapm_disable_pin(codec, "RINPUT3");
snd_soc_dapm_disable_pin(codec, "OUT3");
- snd_soc_dapm_disable_pin(codec, "MONO");
+ snd_soc_dapm_disable_pin(codec, "MONO1");
/* Add spitz specific controls */
for (i = 0; i < ARRAY_SIZE(wm8750_spitz_controls); i++) {